text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
from pymol.wizard import Wizard from pymol import cmd import pymol import copy default_map = [ '', '', ''] default_level = [ 1.0, 3.0, -3.0] default_radius = 8.0 default_track = 0 class Density(Wizard): def __init__(self,_self=cmd): self.cmd = _self self.cmd.unpick() Wizard.__init__(self,_self) # mode selection subsystem self.radius = default_radius self.map = copy.deepcopy(default_map) self.level = copy.deepcopy(default_level) self.track = copy.deepcopy(default_track) self.avail_maps = [] self.menu['radius'] = [ [1, '4.0 A Radius','cmd.get_wizard().set_radius(4)'], [1, '5.0 A Radius','cmd.get_wizard().set_radius(5)'], [1, '6.0 A Radius','cmd.get_wizard().set_radius(6)'], [1, '8.0 A Radius','cmd.get_wizard().set_radius(8)'], [1, '10.0 A Radius','cmd.get_wizard().set_radius(10)'], [1, '15.0 A Radius','cmd.get_wizard().set_radius(15)'], [1, '20.0 A Radius','cmd.get_wizard().set_radius(20)'], [1, '50.0 A Radius','cmd.get_wizard().set_radius(50)'], ] self.menu['map0'] = [] self.menu['map1'] = [] self.menu['map2'] = [] level_menu = lambda x:[ [1, '1.0 sigma','cmd.get_wizard().set_level(%d,1.0)'%x], [1, '1.5 sigma','cmd.get_wizard().set_level(%d,1.5)'%x], [1, '2.0 sigma','cmd.get_wizard().set_level(%d,2.0)'%x], [1, '3.0 sigma','cmd.get_wizard().set_level(%d,3.0)'%x], [1, '5.0 sigma','cmd.get_wizard().set_level(%d,5.0)'%x], [1, '-3.0 sigma','cmd.get_wizard().set_level(%d,-3.0)'%x]] self.menu['level0'] = level_menu(0) self.menu['level1'] = level_menu(1) self.menu['level2'] = level_menu(2) self.menu['track'] = [ [ 1, "Track & Zoom", 'cmd.get_wizard().set_track(0)'], [ 1, "Track & Center", 'cmd.get_wizard().set_track(1)'], [ 1, "Track & Set Origin", 'cmd.get_wizard().set_track(2)'], [ 1, "Track Off", 'cmd.get_wizard().set_track(3)'], ] if (self.map[0] == '') and (self.map[1] == '') and (self.map[2]==''): for a in self.cmd.get_names(): # automatically load first map we find if self.cmd.get_type(a)=='object:map': self.map[0]=a break self.update_map_menus() self.cmd.set_key('pgup',lambda c=cmd:c.get_wizard().next_res(d=-1)) self.cmd.set_key('pgdn',lambda c=cmd:c.get_wizard().next_res()) def update_map_menus(self): self.avail_maps = [] for a in self.cmd.get_names('objects'): if self.cmd.get_type(a)=='object:map': self.avail_maps.append(a) c = 0 for a in self.map: map_kee = 'map'+str(c) level_kee = 'level'+str(c) self.menu[map_kee] = [[2,'Select Map','']] for a in self.avail_maps: self.menu[map_kee].append([ 1,a,'cmd.get_wizard().set_map(%d,"%s")'%(c,a) ]) self.menu[map_kee].append([1,'(none)','cmd.get_wizard().set_map(%d,"")'%c]) c = c + 1 def set_track(self,track): self.track = track self.cmd.refresh_wizard() def set_level(self,map,level): self.level[map] = level self.update_maps() self.cmd.refresh_wizard() def set_map(self,map,map_name): self.map[map] = map_name self.cmd.refresh_wizard() def set_radius(self,radius): self.radius = radius self.update_maps() self.cmd.refresh_wizard() def update_maps(self,zoom=1): sele_name = "_dw" if sele_name not in cmd.get_names('selections'): sele_name = "center" if 1: save = self.cmd.get_setting_text('auto_zoom') self.cmd.set('auto_zoom',0,quiet=1) c = 0 for a in self.map: oname = 'w'+str(c+1)+'_'+a if oname not in self.cmd.get_names(): color = 1 else: color = 0 if len(a) and (a in self.cmd.get_names('objects')): if self.cmd.get_type(a)=='object:map': self.cmd.isomesh(oname,a,self.level[c], sele_name,self.radius,state=1) if color: if c == 0: self.cmd.color('blue',oname) elif c == 1: self.cmd.color('white',oname) else: self.cmd.color('magenta',oname) c = c + 1 save = self.cmd.set('auto_zoom',save,quiet=1) if self.track==0: if zoom: self.cmd.zoom(sele_name,self.radius,animate=0.67) elif self.track==1: if zoom: self.cmd.center(sele_name,animate=0.67) elif self.track==2: if zoom: self.cmd.origin(sele_name,animate=0.67) self.cmd.refresh_wizard() # generic set routines def zoom(self): if '_dw' in self.cmd.get_names('selections'): self.cmd.zoom("(_dw)",self.radius,animate=0.67) else: c = 0 for a in self.map: oname = 'w'+str(c+1)+'_'+a if len(a): if a in self.cmd.get_names('objects'): self.cmd.zoom(oname,animate=0.67) c = c + 1 def get_panel(self): self.update_map_menus() return [ [ 1, 'Density Map Wizard',''], [ 2, 'Update Maps' , 'cmd.get_wizard().update_maps()'], [ 2, 'Zoom' , 'cmd.get_wizard().zoom()'], [ 2, 'Next Res. (PgDown)' , 'cmd.get_wizard().next_res()'], [ 2, 'Previous Res. (PgUp)' , 'cmd.get_wizard().next_res(d=-1)'], [ 3, "Radius: %3.1f A"%self.radius,'radius'], [ 3, "Map 1: "+self.map[0],'map0'], [ 3, " @ "+str(self.level[0])+" sigma",'level0'], [ 3, "Map 2: "+self.map[1],'map1'], [ 3, " @ "+str(self.level[1])+" sigma",'level1'], [ 3, "Map 3: "+self.map[2],'map2'], [ 3, " @ "+str(self.level[2])+" sigma",'level2'], [ 3, self.menu['track'][self.track][1], 'track' ], [ 2, 'Done','cmd.set_wizard()'], ] def cleanup(self): global default_radius,default_map,default_level default_radius = self.radius default_map = self.map default_level = self.level default_track = self.track self.clear() self.cmd.set_key('pgup',None) self.cmd.set_key('pgdn',None) def clear(self): pass def do_select(self,name): if self.track!=2: self.cmd.select("_dw",name,quiet=1) self.update_maps() self.cmd.deselect() def do_pick(self,bondFlag): global dist_count if not bondFlag: if self.track!=2: self.cmd.select("_dw","pk1",quiet=1) self.update_maps() self.cmd.unpick() def get_event_mask(self): return Wizard.event_mask_pick + Wizard.event_mask_select + Wizard.event_mask_position def do_position(self): if '_dw' not in cmd.get_names("selections"): self.update_maps(zoom=0) def next_res(self, d=1): # Donated by Tom Lee if not self.cmd.count_atoms('?_dw'): if self.cmd.count_atoms("?pk1"): self.cmd.select("_dw","pk1") if not ('_dw' in self.cmd.get_names('selections')): print " Density-Wizard: Please pick an atom first." else: obj = self.cmd.index('_dw')[0][0] a0 = self.cmd.get_model('_dw').atom[0] self.cmd.select("_res0", "byres (_dw)") res0 = self.cmd.get_model("_res0") atn = a0.name for a in res0.atom: if (a.name == 'CA'): atn = 'CA' break elif (a.name == 'C1*'): atn = 'C1*' break elif (a.name == 'C1\''): atn = 'C1\'' break n = self.cmd.select('_dw2', ''+obj+'/'+a0.segi+'/'+a0.chain+'/'+str(a0.resi_number+d)+'/'+atn) if (n == 0): # deal with gaps in sequence: self.cmd.select('_chain', ''+obj+'/'+a0.segi+'/'+a0.chain+'//'+atn) chain = self.cmd.get_model('_chain') resids = [] for a in chain.atom: resids.append(a.resi) if (a.resi_number == a0.resi_number): i = len(resids) - 1 next_i = i + d if ((next_i < 0) or (next_i >= len(resids))): print "Current residue is the end of a chain." else: n = self.cmd.select('_dw2', ''+obj+'/'+a0.segi+'/'+a0.chain+'/'+resids[next_i]+'/'+atn) if (n > 0): self.cmd.hide("labels", "?_dw") self.cmd.select('dw_resi', 'byres _dw2') self.cmd.disable('dw_resi') self.cmd.label('(_dw2)', '" %s %s/%s/" % (resn,chain,resi)') self.cmd.select('_dw','_dw2') self.cmd.delete('_dw2') self.update_maps()
gratefulfrog/lib
python/pymol/wizard/density.py
Python
gpl-2.0
10,162
[ "PyMOL" ]
34ee3be4c49ac0bd124bd66b1278376562905073c9969e28c8ccaec652ad178b
from collections import defaultdict, Sized from functools import partial import numpy as np from scipy.stats import rankdata import sklearn from sklearn.base import is_classifier, clone from sklearn.externals.joblib import Parallel, delayed from sklearn.model_selection._search import BaseSearchCV from sklearn.utils import check_random_state from sklearn.utils.fixes import MaskedArray from sklearn.utils.validation import indexable, check_is_fitted from sklearn.metrics.scorer import check_scoring from . import Optimizer from .utils import point_asdict, dimensions_aslist, eval_callbacks from .space import check_dimension from .callbacks import check_callback class BayesSearchCV(BaseSearchCV): """Bayesian optimization over hyper parameters. BayesSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated search over parameter settings. In contrast to GridSearchCV, not all parameter values are tried out, but rather a fixed number of parameter settings is sampled from the specified distributions. The number of parameter settings that are tried is given by n_iter. Parameters are presented as a list of skopt.space.Dimension objects. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each search point. This object is assumed to implement the scikit-learn estimator api. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. search_spaces : dict, list of dict or list of tuple containing (dict, int). One of these cases: 1. dictionary, where keys are parameter names (strings) and values are skopt.space.Dimension instances (Real, Integer or Categorical) or any other valid value that defines skopt dimension (see skopt.Optimizer docs). Represents search space over parameters of the provided estimator. 2. list of dictionaries: a list of dictionaries, where every dictionary fits the description given in case 1 above. If a list of dictionary objects is given, then the search is performed sequentially for every parameter space with maximum number of evaluations set to self.n_iter. 3. list of (dict, int > 0): an extension of case 2 above, where first element of every tuple is a dictionary representing some search subspace, similarly as in case 2, and second element is a number of iterations that will be spent optimizing over this subspace. n_iter : int, default=50 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. Consider increasing ``n_points`` if you want to try more parameter settings in parallel. optimizer_kwargs : dict, optional Dict of arguments passed to :class:`Optimizer`. For example, ``{'base_estimator': 'RF'}`` would use a Random Forest surrogate instead of the default Gaussian Process. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. At maximum there are ``n_points`` times ``cv`` jobs available during each iteration. n_points : int, default=1 Number of parameter settings to sample in parallel. If this does not align with ``n_iter``, the last iteration will sample less points. See also :func:`~Optimizer.ask` pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. return_train_score : boolean, default=False If ``'True'``, the ``cv_results_`` attribute will include training scores. Example ------- from skopt import BayesSearchCV # parameter ranges are specified by one of below from skopt.space import Real, Categorical, Integer from sklearn.datasets import load_iris from sklearn.svm import SVC from sklearn.model_selection import train_test_split X, y = load_iris(True) X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75, random_state=0) # log-uniform: understand as search over p = exp(x) by varying x opt = BayesSearchCV( SVC(), { 'C': Real(1e-6, 1e+6, prior='log-uniform'), 'gamma': Real(1e-6, 1e+1, prior='log-uniform'), 'degree': Integer(1,8), 'kernel': Categorical(['linear', 'poly', 'rbf']), }, n_iter=32 ) # executes bayesian optimization opt.fit(X_train, y_train) # model can be saved, used for predictions or scoring print(opt.score(X_test, y_test)) Attributes ---------- cv_results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. For instance the below given table +--------------+-------------+-------------------+---+---------------+ | param_kernel | param_gamma | split0_test_score |...|rank_test_score| +==============+=============+===================+===+===============+ | 'rbf' | 0.1 | 0.8 |...| 2 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.2 | 0.9 |...| 1 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.3 | 0.7 |...| 1 | +--------------+-------------+-------------------+---+---------------+ will be represented by a ``cv_results_`` dict of:: { 'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'], mask = False), 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False), 'split0_test_score' : [0.8, 0.9, 0.7], 'split1_test_score' : [0.82, 0.5, 0.7], 'mean_test_score' : [0.81, 0.7, 0.7], 'std_test_score' : [0.02, 0.2, 0.], 'rank_test_score' : [3, 1, 1], 'split0_train_score' : [0.8, 0.9, 0.7], 'split1_train_score' : [0.82, 0.5, 0.7], 'mean_train_score' : [0.81, 0.7, 0.7], 'std_train_score' : [0.03, 0.03, 0.04], 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49], 'std_fit_time' : [0.01, 0.02, 0.01, 0.01], 'mean_score_time' : [0.007, 0.06, 0.04, 0.04], 'std_score_time' : [0.001, 0.002, 0.003, 0.005], 'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...], } NOTE that the key ``'params'`` is used to store a list of parameter settings dict for all the parameter candidates. The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and ``std_score_time`` are all in seconds. best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``cv_results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.cv_results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). Notes ----- The parameters selected are those that maximize the score of the held-out data, according to the scoring parameter. If `n_jobs` was set to a value higher than one, the data is copied for each parameter setting(and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also -------- :class:`GridSearchCV`: Does exhaustive search over a grid of parameters. """ def __init__(self, estimator, search_spaces, optimizer_kwargs=None, n_iter=50, scoring=None, fit_params=None, n_jobs=1, n_points=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', random_state=None, error_score='raise', return_train_score=False): self.search_spaces = search_spaces self.n_iter = n_iter self.n_points = n_points self.random_state = random_state self.optimizer_kwargs = optimizer_kwargs self._check_search_space(self.search_spaces) super(BayesSearchCV, self).__init__( estimator=estimator, scoring=scoring, fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score, return_train_score=return_train_score) def _check_search_space(self, search_space): """Checks whether the search space argument is correct""" if len(search_space) == 0: raise ValueError( "The search_spaces parameter should contain at least one" "non-empty search space, got %s" % search_space ) # check if space is a single dict, convert to list if so if isinstance(search_space, dict): search_space = [search_space] # check if the structure of the space is proper if isinstance(search_space, list): # convert to just a list of dicts dicts_only = [] # 1. check the case when a tuple of space, n_iter is provided for elem in search_space: if isinstance(elem, tuple): if len(elem) != 2: raise ValueError( "All tuples in list of search spaces should have" "length 2, and contain (dict, int), got %s" % elem ) subspace, n_iter = elem if (not isinstance(n_iter, int)) or n_iter < 0: raise ValueError( "Number of iterations in search space should be" "positive integer, got %s in tuple %s " % (n_iter, elem) ) # save subspaces here for further checking dicts_only.append(subspace) elif isinstance(elem, dict): dicts_only.append(elem) else: raise TypeError( "A search space should be provided as a dict or" "tuple (dict, int), got %s" % elem) # 2. check all the dicts for correctness of contents for subspace in dicts_only: for k, v in subspace.items(): check_dimension(v) else: raise TypeError( "Search space should be provided as a dict or list of dict," "got %s" % search_space) # copied for compatibility with 0.19 sklearn from 0.18 BaseSearchCV @property def best_score_(self): check_is_fitted(self, 'cv_results_') return self.cv_results_['mean_test_score'][self.best_index_] # copied for compatibility with 0.19 sklearn from 0.18 BaseSearchCV @property def best_params_(self): check_is_fitted(self, 'cv_results_') return self.cv_results_['params'][self.best_index_] # copied for compatibility with 0.19 sklearn from 0.18 BaseSearchCV def _fit(self, X, y, groups, parameter_iterable): """ Actual fitting, performing the search over parameters. Taken from https://github.com/scikit-learn/scikit-learn/blob/0.18.X .../sklearn/model_selection/_search.py """ estimator = self.estimator cv = sklearn.model_selection._validation.check_cv( self.cv, y, classifier=is_classifier(estimator)) self.scorer_ = check_scoring( self.estimator, scoring=self.scoring) X, y, groups = indexable(X, y, groups) n_splits = cv.get_n_splits(X, y, groups) if self.verbose > 0 and isinstance(parameter_iterable, Sized): n_candidates = len(parameter_iterable) print("Fitting {0} folds for each of {1} candidates, totalling" " {2} fits".format(n_splits, n_candidates, n_candidates * n_splits)) base_estimator = clone(self.estimator) pre_dispatch = self.pre_dispatch cv_iter = list(cv.split(X, y, groups)) out = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch )(delayed(sklearn.model_selection._validation._fit_and_score)( clone(base_estimator), X, y, self.scorer_, train, test, self.verbose, parameters, fit_params=self.fit_params, return_train_score=self.return_train_score, return_n_test_samples=True, return_times=True, return_parameters=True, error_score=self.error_score ) for parameters in parameter_iterable for train, test in cv_iter) # if one choose to see train score, "out" will contain train score info if self.return_train_score: (train_scores, test_scores, test_sample_counts, fit_time, score_time, parameters) = zip(*out) else: (test_scores, test_sample_counts, fit_time, score_time, parameters) = zip(*out) candidate_params = parameters[::n_splits] n_candidates = len(candidate_params) results = dict() def _store(key_name, array, weights=None, splits=False, rank=False): """A small helper to store the scores/times to the cv_results_""" array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits) if splits: for split_i in range(n_splits): results["split%d_%s" % (split_i, key_name)] = array[:, split_i] array_means = np.average(array, axis=1, weights=weights) results['mean_%s' % key_name] = array_means # Weighted std is not directly available in numpy array_stds = np.sqrt(np.average((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights)) results['std_%s' % key_name] = array_stds if rank: results["rank_%s" % key_name] = np.asarray( rankdata(-array_means, method='min'), dtype=np.int32) # Computed the (weighted) mean and std for test scores alone # NOTE test_sample counts (weights) remain the same for all candidates test_sample_counts = np.array(test_sample_counts[:n_splits], dtype=np.int) _store('test_score', test_scores, splits=True, rank=True, weights=test_sample_counts if self.iid else None) if self.return_train_score: _store('train_score', train_scores, splits=True) _store('fit_time', fit_time) _store('score_time', score_time) best_index = np.flatnonzero(results["rank_test_score"] == 1)[0] best_parameters = candidate_params[best_index] # Use one MaskedArray and mask all the places where the param is not # applicable for that candidate. Use defaultdict as each candidate may # not contain all the params param_results = defaultdict(partial( MaskedArray, np.empty(n_candidates,), mask=True, dtype=object)) for cand_i, params in enumerate(candidate_params): for name, value in params.items(): # An all masked empty array gets created for the key # `"param_%s" % name` at the first occurence of `name`. # Setting the value at an index also unmasks that index param_results["param_%s" % name][cand_i] = value results.update(param_results) # Store a list of param dicts at the key 'params' results['params'] = candidate_params self.cv_results_ = results self.best_index_ = best_index self.n_splits_ = n_splits if self.refit: # fit the best estimator using the entire dataset # clone first to work around broken estimators best_estimator = clone(base_estimator).set_params( **best_parameters) if y is not None: best_estimator.fit(X, y, **self.fit_params) else: best_estimator.fit(X, **self.fit_params) self.best_estimator_ = best_estimator return self def _fit_best_model(self, X, y): """Fit the estimator copy with best parameters found to the provided data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], Target relative to X for classification or regression. Returns ------- self """ self.best_estimator_ = clone(self.estimator) self.best_estimator_.set_params(**self.best_params_) self.best_estimator_.fit(X, y, **(self.fit_params or {})) return self def _make_optimizer(self, params_space): """Instantiate skopt Optimizer class. Parameters ---------- params_space : dict Represents parameter search space. The keys are parameter names (strings) and values are skopt.space.Dimension instances, one of Real, Integer or Categorical. Returns ------- optimizer: Instance of the `Optimizer` class used for for search in some parameter space. """ kwargs = self.optimizer_kwargs_.copy() kwargs['dimensions'] = dimensions_aslist(params_space) optimizer = Optimizer(**kwargs) return optimizer def _step(self, X, y, search_space, optimizer, groups=None, n_points=1): """Generate n_jobs parameters and evaluate them in parallel. """ # get parameter values to evaluate params = optimizer.ask(n_points=n_points) # convert parameters to python native types params = [[np.asscalar(np.array(v)) for v in p] for p in params] # make lists into dictionaries params_dict = [point_asdict(search_space, p) for p in params] # HACK: self.cv_results_ is reset at every call to _fit, keep current all_cv_results = self.cv_results_ # HACK: this adds compatibility with different versions of sklearn refit = self.refit self.refit = False self._fit(X, y, groups, params_dict) self.refit = refit # merge existing and new cv_results_ for k in self.cv_results_: all_cv_results[k].extend(self.cv_results_[k]) self.cv_results_ = all_cv_results self.best_index_ = np.argmax(self.cv_results_['mean_test_score']) # feed the point and objective back into optimizer local_results = self.cv_results_['mean_test_score'][-len(params):] # optimizer minimizes objective, hence provide negative score return optimizer.tell(params, [-score for score in local_results]) @property def total_iterations(self): """ Count total iterations that will be taken to explore all subspaces with `fit` method. Returns ------- max_iter: int, total number of iterations to explore """ total_iter = 0 for elem in self.search_spaces: if isinstance(elem, tuple): space, n_iter = elem else: n_iter = self.n_iter total_iter += n_iter return total_iter def _run_search(self, x): pass def fit(self, X, y=None, groups=None, callback=None): """Run fit on the estimator with randomly drawn parameters. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. y : array-like, shape = [n_samples] or [n_samples, n_output] Target relative to X for classification or regression (class labels should be integers or strings). groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. callback: [callable, list of callables, optional] If callable then `callback(res)` is called after each parameter combination tested. If list of callables, then each callable in the list is called. """ # check if space is a single dict, convert to list if so search_spaces = self.search_spaces if isinstance(search_spaces, dict): search_spaces = [search_spaces] callbacks = check_callback(callback) if self.optimizer_kwargs is None: self.optimizer_kwargs_ = {} else: self.optimizer_kwargs_ = dict(self.optimizer_kwargs) random_state = check_random_state(self.random_state) self.optimizer_kwargs_['random_state'] = random_state # Instantiate optimizers for all the search spaces. optimizers = [] for search_space in search_spaces: if isinstance(search_space, tuple): search_space = search_space[0] optimizers.append(self._make_optimizer(search_space)) self.optimizers_ = optimizers # will save the states of the optimizers self.cv_results_ = defaultdict(list) self.best_index_ = None self.multimetric_ = False n_points = self.n_points for search_space, optimizer in zip(search_spaces, optimizers): # if not provided with search subspace, n_iter is taken as # self.n_iter if isinstance(search_space, tuple): search_space, n_iter = search_space else: n_iter = self.n_iter # do the optimization for particular search space while n_iter > 0: # when n_iter < n_points points left for evaluation n_points_adjusted = min(n_iter, n_points) optim_result = self._step( X, y, search_space, optimizer, groups=groups, n_points=n_points_adjusted ) n_iter -= n_points if eval_callbacks(callbacks, optim_result): break # Refit the best model on the the whole dataset if self.refit: self._fit_best_model(X, y) return self
betatim/BlackBox
skopt/searchcv.py
Python
bsd-3-clause
27,200
[ "Gaussian" ]
a56a32c08191820c42956d27df4d1618b6e5cf018fb9f0f0ccd287c86a8bc428
import os.path as op import numpy as nm from sfepy import data_dir from sfepy.base.testing import TestCommon def init_vec(variables): return nm.random.rand(variables.di.ptr[-1]) def check_vec(self, vec, ii, ok, conds, variables): from sfepy.discrete.common.dof_info import expand_nodes_to_equations for var_name, var_conds in conds.group_by_variables().iteritems(): var = variables[var_name] for cond in var_conds: cond.canonize_dof_names(var.dofs) self.report('%d: %s %s: %s %s' % (ii, var.name, cond.name, cond.region.name, cond.dofs[0])) nods = var.field.get_dofs_in_region(cond.region) eq = expand_nodes_to_equations(nods, cond.dofs[0], var.dofs) off = variables.di.indx[var_name].start n_nod = len(nods) for cdof, dof_name in enumerate(cond.dofs[0]): idof = var.dofs.index(dof_name) eqs = eq[n_nod * cdof : n_nod * (cdof + 1)] _ok = nm.allclose(vec[off + eqs], idof, atol=1e-14, rtol=0.0) if not _ok: self.report(' %s: failed! (all of %s == %f)' % (dof_name, vec[off + eqs], idof)) ok = ok and _ok return ok class Test(TestCommon): @staticmethod def from_conf(conf, options): from sfepy.discrete import FieldVariable, Variables, Problem from sfepy.discrete.fem import Mesh, FEDomain, Field mesh = Mesh.from_file(data_dir + '/meshes/2d/square_unit_tri.mesh') domain = FEDomain('domain', mesh) omega = domain.create_region('Omega', 'all') domain.create_region('Left', 'vertices in (x < -0.499)', 'facet') domain.create_region('LeftStrip', 'vertices in (x < -0.499)' ' & (y > -0.199) & (y < 0.199)', 'facet') domain.create_region('LeftFix', 'r.Left -v r.LeftStrip', 'facet') domain.create_region('Right', 'vertices in (x > 0.499)', 'facet') domain.create_region('RightStrip', 'vertices in (x > 0.499)' ' & (y > -0.199) & (y < 0.199)', 'facet') domain.create_region('RightFix', 'r.Right -v r.RightStrip', 'facet') fu = Field.from_args('fu', nm.float64, 'vector', omega, approx_order=2) u = FieldVariable('u', 'unknown', fu) fp = Field.from_args('fp', nm.float64, 'scalar', omega, approx_order=2) p = FieldVariable('p', 'unknown', fp) pb = Problem('test', domain=domain, fields=[fu, fp], auto_conf=False, auto_solvers=False) test = Test(problem=pb, variables=Variables([u, p]), conf=conf, options=options) return test def test_ics(self): from sfepy.discrete.conditions import Conditions, InitialCondition variables = self.variables omega = self.problem.domain.regions['Omega'] all_ics = [] all_ics.append(InitialCondition('ic0', omega, {'p.all' : 0.0})) all_ics.append(InitialCondition('ic1', omega, {'u.1' : 1.0})) all_ics.append(InitialCondition('ic2', omega, {'u.all' : nm.array([0.0, 1.0])})) all_ics.append(InitialCondition('ic3', omega, {'p.0' : 0.0, 'u.0' : 0.0, 'u.1' : 1.0})) ok = True for ii, ics in enumerate(all_ics): if not isinstance(ics, list): ics = [ics] ics = Conditions(ics) variables.setup_initial_conditions(ics, functions=None) vec = init_vec(variables) variables.apply_ic(vec) ok = check_vec(self, vec, ii, ok, ics, variables) return ok def test_ebcs(self): from sfepy.discrete.conditions import Conditions, EssentialBC variables = self.variables regions = self.problem.domain.regions all_ebcs = [] all_ebcs.append(EssentialBC('fix_u1', regions['LeftFix'], {'u.all' : nm.array([0.0, 1.0])})) all_ebcs.append(EssentialBC('fix_u2', regions['LeftStrip'], {'u.0' : 0.0, 'u.1' : 1.0})) all_ebcs.append(EssentialBC('fix_p1', regions['RightFix'], {'p.all' : 0.0})) all_ebcs.append(EssentialBC('fix_p2', regions['RightStrip'], {'p.0' : 0.0})) all_ebcs.append([EssentialBC('fix_p3', regions['Right'], {'p.0' : 0.0}), EssentialBC('fix_u3', regions['Left'], {'u.0' : 0.0, 'u.1' : 1.0})]) ok = True for ii, bcs in enumerate(all_ebcs): if not isinstance(bcs, list): bcs = [bcs] ebcs = Conditions(bcs) variables.equation_mapping(ebcs=ebcs, epbcs=None, ts=None, functions=None) vec = init_vec(variables) variables.apply_ebc(vec) ok = check_vec(self, vec, ii, ok, ebcs, variables) return ok def test_epbcs(self): from sfepy.discrete import Function, Functions from sfepy.discrete.conditions import Conditions, PeriodicBC from sfepy.discrete.common.dof_info import expand_nodes_to_equations from sfepy.discrete.fem.periodic import match_y_line variables = self.variables regions = self.problem.domain.regions match_y_line = Function('match_y_line', match_y_line) pbc = PeriodicBC('pbc', [regions['LeftStrip'], regions['RightStrip']], {'u.[1,0]' : 'u.[0,1]'}, match='match_y_line') functions = Functions([match_y_line]) epbcs = Conditions([pbc]) variables.equation_mapping(ebcs=None, epbcs=epbcs, ts=None, functions=functions) vec = init_vec(variables) variables.apply_ebc(vec) var = variables['u'] var_bcs = epbcs.group_by_variables()['u'] bc = var_bcs['pbc'] bc.canonize_dof_names(var.dofs) nods0 = var.field.get_dofs_in_region(bc.regions[0]) nods1 = var.field.get_dofs_in_region(bc.regions[1]) coors0 = var.field.get_coor(nods0) coors1 = var.field.get_coor(nods1) i0, i1 = match_y_line(coors0, coors1) eq0 = expand_nodes_to_equations(nods0[i0], bc.dofs[0], var.dofs) eq1 = expand_nodes_to_equations(nods1[i1], bc.dofs[1], var.dofs) ok = True _ok = len(nm.setdiff1d(eq0, var.eq_map.master)) == 0 if not _ok: self.report('master equations mismatch! (set(%s) == set(%s))' % (eq0, var.eq_map.master)) ok = ok and _ok _ok = len(nm.setdiff1d(eq1, var.eq_map.slave)) == 0 if not _ok: self.report('slave equations mismatch! (set(%s) == set(%s))' % (eq1, var.eq_map.slave)) ok = ok and _ok off = variables.di.indx['u'].start _ok = nm.allclose(vec[off + eq0], vec[off + eq1], atol=1e-14, rtol=0.0) if not _ok: self.report('periodicity test failed! (%s == %s)' % (vec[off + eq0], vec[off + eq0])) ok = ok and _ok return ok def test_save_ebc(self): from sfepy.discrete import (FieldVariable, Integral, Equation, Equations, Problem) from sfepy.discrete.conditions import Conditions, EssentialBC from sfepy.terms import Term name = op.join(self.options.out_dir, op.splitext(op.basename(__file__))[0]) integral = Integral('i', order=1) u = self.variables['u'] v = FieldVariable('v', 'test', u.field, primary_var_name='u') p = self.variables['p'] q = FieldVariable('q', 'test', p.field, primary_var_name='p') regions = self.problem.domain.regions omega = regions['Omega'] # Problem.save_ebc() requires to have equations defined. t1 = Term.new('dw_lin_elastic(v, u)', integral, omega, v=v, u=u) t2 = Term.new('dw_laplace(q, p)', integral, omega, q=q, p=p) eq = Equation('aux', t1 + t2) eqs = Equations([eq]) pb = Problem('test', equations=eqs, auto_solvers=False) all_ebcs = [] all_ebcs.append(EssentialBC('fix_u1', regions['RightFix'], {'u.all' : nm.array([0.0, 1.0])})) all_ebcs.append(EssentialBC('fix_u2', regions['LeftStrip'], {'u.0' : 0.0, 'u.1' : 1.0})) all_ebcs.append(EssentialBC('fix_p1', regions['LeftFix'], {'p.all' : 0.0})) all_ebcs.append(EssentialBC('fix_p2', regions['RightStrip'], {'p.0' : 0.0})) ebcs = Conditions(all_ebcs) pb.time_update(ebcs=ebcs) pb.save_ebc(name + '_ebcs_f.vtk', ebcs=ebcs, force=True) pb.save_ebc(name + '_ebcs.vtk', ebcs=ebcs, default=-1, force=False) return True
RexFuzzle/sfepy
tests/test_conditions.py
Python
bsd-3-clause
9,711
[ "VTK" ]
feeeb6a5ba2ad93ddaf5035481e28d81c7be2a712f507162a5eaf360b2112b10
import re import sys import datetime import AlphaSubstValidation import AlphaSubstPrep import AlphaSubstBaseMLBootstrap import AlphaSubstScoring import random def stop_err(msg): "Write the error message and exit" sys.stderr.write(msg) sys.exit() #Retrieve Data OutputFile = sys.argv[1] Method = sys.argv[2] SubstModel = sys.argv[3] DoSingleBoot = sys.argv[4] SingleBootIterations = sys.argv[5] DoDoubleBoot = sys.argv[6] DoubleBootIterations = sys.argv[7] Sequences1 = sys.argv[8] TreeDefinition = sys.argv[9] FixKappa = sys.argv[10] KappaValue = sys.argv[11] Output_Format = sys.argv[12] #Stat_Results_Outfile = sys.argv[13] FixAlpha = 1 AlphaValue = 0 FixRho = 1 RhoValue = 0 AlgMethod = 0 MClock = 0 #Get galaxy location OutputSplit = re.compile('database') OutContents = OutputSplit.split(OutputFile) GalaxyLocation = OutContents[0] BaseMLLocation = "/home/universe/linux-i686/PAML/paml3.15/bin/" UserRandomKey = str(datetime.date.today()) + "-" + str(random.randrange(0,50000,1)) GetSE = 0 ExtraBaseML = 0 if int(Method) == 0: ExtraBaseML = 1 elif int(Method) == 1 and int(DoDoubleBoot) == 0: DoDoubleBoot = 1 DoSingleBoot = 0 DoubleBootIterations = 1 if int(DoSingleBoot) == 0: SingleBootIterations = 1 #Stat reporting - debugging Stat_Results_Iteration = [] Stat_Results_Sample = [] Stat_Results = "" #Initial Data Validation AlphaValid = AlphaSubstValidation.AlphaSubstValidation() ValidationErrors = AlphaValid.ValidateBaseMLData(DoSingleBoot,SingleBootIterations,TreeDefinition,Sequences1,DoDoubleBoot,DoubleBootIterations) if ValidationErrors != "": stop_err(ValidationErrors) #No projected errors; continue SequenceCount = AlphaValid.SequenceCount Group1AlignmentCount = AlphaValid.Group1AlignmentCount Group1Alignments = AlphaValid.Group1Alignments Group1AlignLength = AlphaValid.Group1AlignLength TotalSequenceLength1 = AlphaValid.TotalSequenceLength1 #HeaderList = AlphaValid.Group1Headers #Prepare the data for BaseML AlphaPrep = AlphaSubstPrep.AlphaSubstPrep() AlphaPrep.PrepBaseML(1,TreeDefinition,SequenceCount,0,UserRandomKey,BaseMLLocation,SubstModel,GetSE,0,0,GalaxyLocation,FixAlpha,AlphaValue,FixKappa,KappaValue,FixRho,RhoValue) BranchDescriptions = AlphaPrep.BranchDescriptions #Prepare scoring class AlphaSaveData = AlphaSubstScoring.AlphaSubstScoring(0,0,0,1,BranchDescriptions,"","","","","","","","",0,GetSE) #Perform Boostrapping and BaseML Functions AlphaSubstWork = AlphaSubstBaseMLBootstrap.AlphaSubstBaseMLBootstrap("") TimesFailed = 0 if Output_Format == "txt": AlphaSubstWork.StrapSequence(Group1Alignments,TotalSequenceLength1,SequenceCount,UserRandomKey,0) AlphaSubstWork.RunBaseML(BaseMLLocation,UserRandomKey,GalaxyLocation) Results = AlphaSubstWork.ReturnBaseMLFile(UserRandomKey,GalaxyLocation) #Get the results from baseml execution else: if int(ExtraBaseML) == 1 and int(DoDoubleBoot) == 0: #This performs an initial run on the data AlphaSubstWork.StrapSequence(Group1Alignments,TotalSequenceLength1,SequenceCount,UserRandomKey,0) AlphaSubstWork.RunBaseML(BaseMLLocation,UserRandomKey,GalaxyLocation) SuccessfulStrap = int(AlphaSubstWork.ScoreBaseML(BaseMLLocation,UserRandomKey,BranchDescriptions,GalaxyLocation,GetSE,ExtraBaseML,SubstModel)) #Get the results from baseml execution if SuccessfulStrap != 0: AlphaSubstWork.GotExtraBaseML = 1 AlphaSaveData.TransRatio = AlphaSubstWork.TransRatio AlphaSaveData.BaseFreq = AlphaSubstWork.BaseFreq AlphaSaveData.RateParameters = AlphaSubstWork.RateParameters AlphaSaveData.RateParameterHeaders = AlphaSubstWork.RateParameterHeaders AlphaSaveData.RateMatrix = AlphaSubstWork.RateMatrix else: stop_err("Alignment appears to be incompatible. Process terminated.") TimesFailed = 0; if int(DoDoubleBoot) == 0: Iterations = SingleBootIterations else: Iterations = DoubleBootIterations for IterationIndex in range(0,int(Iterations)): SuccessfulStrap = 0 if int(DoDoubleBoot) == 0: while int(SuccessfulStrap) == 0 and int(TimesFailed) <= 100: AlphaSubstWork.StrapSequence(Group1Alignments,TotalSequenceLength1,SequenceCount,UserRandomKey,DoSingleBoot) AlphaSubstWork.RunBaseML(BaseMLLocation,UserRandomKey,GalaxyLocation) SuccessfulStrap = int(AlphaSubstWork.ScoreBaseML(BaseMLLocation,UserRandomKey,BranchDescriptions,GalaxyLocation,GetSE,ExtraBaseML,SubstModel)) #Get the results from baseml execution #Save the baseml results to the score class if SuccessfulStrap != 0: AlphaSaveData.AddScores(AlphaSubstWork.BaseMLScores,AlphaSubstWork.BaseMLBranchDesc,1,AlphaSubstWork.SEScores) else: TimesFailed += 1 else: #Double Bootstrapping #Initialize a blank array for per iteration storage IterationBranchScoreArray = [] for TempBranch in BranchDescriptions: IterationBranchScoreArray.append(0) SequenceIDArray = [] #Top level (double) bootstrapping for DoubleBootIndex in range(0,Group1AlignmentCount): if DoSingleBoot == 1: SequenceIDArray.append(str(random.randrange(0,Group1AlignmentCount,1))) else: SequenceIDArray.append(str(DoubleBootIndex)) #Get new a total sequence length WeightedLength1 = 0 for SequenceID in SequenceIDArray: WeightedLength1 += Group1AlignLength[int(SequenceID)] for DoubleBootIndex in range(0,Group1AlignmentCount): SequenceID = random.randrange(0,Group1AlignmentCount,1) SequenceLength = Group1AlignLength[SequenceID] Sequence = Group1Alignments[SequenceID] for SequenceID in SequenceIDArray: SequenceID = int(SequenceID) SequenceLength = Group1AlignLength[SequenceID] Sequence = Group1Alignments[SequenceID] SuccessfulStrap = 0 while SuccessfulStrap == 0 and TimesFailed <= 100: AlphaSubstWork.WriteDBSAlignment(SequenceLength,SequenceCount,Sequence,UserRandomKey,DoSingleBoot) AlphaSubstWork.RunBaseML(BaseMLLocation,UserRandomKey,GalaxyLocation) SuccessfulStrap = int(AlphaSubstWork.ScoreBaseML(BaseMLLocation,UserRandomKey,BranchDescriptions,GalaxyLocation,0,ExtraBaseML,SubstModel)) if SuccessfulStrap != 0: BranchScores = AlphaSaveData.Get_DBS_Scores(AlphaSubstWork.BaseMLScores,AlphaSubstWork.BaseMLBranchDesc,1,SequenceLength,WeightedLength1) for BranchSubIndex in range (0,len(BranchScores)): IterationBranchScoreArray[BranchSubIndex] += BranchScores[BranchSubIndex] else: TimesFailed += 1 if TimesFailed > 100: stop_err("Maximum chances expended. Please inspect your sequences.") #Save the data AlphaSaveData.Save_DBS_Scores(IterationBranchScoreArray,AlphaSubstWork.BaseMLBranchDesc,1,1) if TimesFailed > 100: stop_err("Maximum chances expended. Please inspect your sequences.") #Reporting Results = AlphaSaveData.CalcStatScores(Iterations,0,DoSingleBoot,1,DoDoubleBoot,Sequences1,"",SubstModel,GetSE,ExtraBaseML,TotalSequenceLength1,0,"BaseML", Output_Format) #create output of = open(OutputFile,'w') print >>of,Results #Debugging Statistics #Stat_Results = "DoubleBoot = " + str(DoDoubleBoot) + "<BR>" #Stat_Results += "Iterations = " + str(Iterations) + "<BR>" #Stat_Results += "Alignments = " + str(Group1AlignmentCount) + "<BR><BR>" #Stat_Results += "Keys = " #for KeyIndex in range(0,len(AlphaSaveData.NameScoreDictionary)): # Stat_Results += str(AlphaSaveData.NameScoreDictionary[KeyIndex]) + " " #Stat_Results += "<BR><BR>" #for Index in range(0,len(Stat_Results_Sample)): # Stat_Results += "Run #" + str(Index + 1) + ": " # for SampleIndex in range(0,len(Stat_Results_Sample)): # Stat_Results += str(Stat_Results_Sample[SampleIndex]) + " " # Stat_Results += "<BR>Branch Scores " # for SampleIndex in range(0,len(Stat_Results_Iteration)): # Stat_Results += str(Stat_Results_Iteration[SampleIndex]) + " " #Stat_Results += "<BR><BR>" #of2 = open(Stat_Results_Outfile,'w') #print >>of2,Stat_Results #Clean up data AlphaSubstWork.FinalCleanUp(BaseMLLocation,GalaxyLocation,UserRandomKey)
jmchilton/galaxy-central
tools/mdea/BaseML.py
Python
mit
8,690
[ "Galaxy" ]
eb4591969bba5656ccd95b5a6d8e32d600d946a41972c1eda3001d47c920f3cb
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function, division import numpy as np import copy from scipy.spatial.distance import cdist from pyscf.nao.log_mesh import log_mesh # # # class ao_log(log_mesh): ''' Holder of radial orbitals on logarithmic grid. Args: ions : list of ion structures (read from ion files from siesta) or gto : gaussian type of orbitals object from pySCF or gpaw : ?? Returns: ao_log: sp2ion (ion structure from m_siesta_ion or m_siesta_ion_xml): List of structure composed of several field read from the ions file. nr (int): number of radial point rmin (float) kmax (float) rmax (float) rr pp psi_log psi_log_rl sp2rcut (array, float): array containing the rcutoff of each specie sp_mu2rcut (array, float) interp_rr instance of log_interp_c to interpolate along real-space axis interp_pp instance of log_interp_c to interpolate along momentum-space axis Examples: ''' def __init__(self, **kw): """ Initializes numerical orbitals """ log_mesh.__init__(self, **kw) if 'ao_log' in kw : # this is creating a deepcopy of all attributes ao = kw['ao_log'] for a in ao.__dict__.keys(): try: setattr(self, a, copy.deepcopy(getattr(ao, a))) except: pass elif 'gto' in kw: self.init_ao_log_gto(**kw) elif 'sp2ion' in kw: self.init_ao_log_ion(**kw) elif 'setups' in kw: self.init_ao_log_gpaw(**kw) elif 'xyz_list' in kw: pass else: print(kw.keys()) raise RuntimeError('unknown initialization method') def init_ao_log_gto(self, **kw): """ supposed to be private """ import numpy as np from pyscf.nao.m_log_interp import log_interp_c self.interp_rr,self.interp_pp = log_interp_c(self.rr), log_interp_c(self.pp) gto,rcut_tol = kw['gto'],self.rcut_tol a2s = [gto.atom_symbol(ia) for ia in range(gto.natm) ] self.sp2symbol = sorted(list(set(a2s))) nspecies = self.nspecies = len(self.sp2symbol) atom2sp = np.array([self.sp2symbol.index(s) for s in a2s], dtype=np.int64) self.sp2charge = np.array([-999]*self.nspecies, dtype=np.int64) for ia,sp in enumerate(atom2sp): self.sp2charge[sp]=gto.atom_charge(ia) self.sp_mu2j = [0]*nspecies self.psi_log = [0]*nspecies self.psi_log_rl = [0]*nspecies self.sp2nmult = np.zeros(nspecies, dtype=np.int64) seen_species = [] # this is auxiliary to organize the loop over species for ia,sp in enumerate(atom2sp): if sp in seen_species: continue seen_species.append(sp) self.sp2nmult[sp] = nmu = sum([gto.bas_nctr(sid) for sid in gto.atom_shell_ids(ia)]) mu2ff = np.zeros((nmu, self.nr)) mu2ff_rl = np.zeros((nmu, self.nr)) mu2j = np.zeros(nmu, dtype=np.int64) mu = -1 for sid in gto.atom_shell_ids(ia): pows, coeffss = gto.bas_exp(sid), gto.bas_ctr_coeff(sid) for coeffs in coeffss.T: mu=mu+1 l = mu2j[mu] = gto.bas_angular(sid) for ir, r in enumerate(self.rr): mu2ff_rl[mu,ir] = sum(pows[:]**((2*l+3)/4.0)*coeffs[:]*np.exp(-pows[:]*r**2)) mu2ff[mu,ir] = r**l*mu2ff_rl[mu,ir] self.sp_mu2j[sp] = mu2j norms = [np.sqrt(self.interp_rr.dg_jt*sum(ff**2*self.rr**3)) for ff in mu2ff] for mu,norm in enumerate(norms): mu2ff[mu,:] = mu2ff[mu,:]/norm mu2ff_rl[mu,:] = mu2ff_rl[mu,:]/norm self.psi_log[sp] = mu2ff self.psi_log_rl[sp] = mu2ff_rl self.jmx = max([mu2j.max() for mu2j in self.sp_mu2j]) self.sp_mu2s = [] for mu2j in self.sp_mu2j: mu2s = np.zeros(len(mu2j)+1, dtype=np.int64) for mu,j in enumerate(mu2j): mu2s[mu+1] = mu2s[mu]+2*j+1 self.sp_mu2s.append(mu2s) self.sp2norbs = np.array([mu2s[-1] for mu2s in self.sp_mu2s]) self.sp_mu2rcut = [] for sp, mu2ff in enumerate(self.psi_log): mu2rcut = np.zeros(len(mu2ff)) for mu,ff in enumerate(mu2ff): ffmx,irmx = abs(mu2ff[mu]).max(), abs(mu2ff[mu]).argmax() irrp = np.argmax(abs(ff[irmx:])<ffmx*rcut_tol) irrc = irmx+irrp if irrp>0 else -1 mu2rcut[mu] = self.rr[irrc] self.sp_mu2rcut.append(mu2rcut) self.sp2rcut = np.array([mu2rcut.max() for mu2rcut in self.sp_mu2rcut]) return self # # def init_ao_log_ion(self, **kw): """ Reads data from a previous SIESTA calculation, interpolates the Pseudo-Atomic Orbitals on a single log mesh. """ from pyscf.nao.m_log_interp import log_interp_c from pyscf.nao.m_spline_diff2 import spline_diff2 from pyscf.nao.m_spline_interp import spline_interp self.interp_rr,self.interp_pp = log_interp_c(self.rr), log_interp_c(self.pp) sp2ion = self.sp2ion = kw['sp2ion'] fname = kw['fname'] if 'fname' in kw else 'paos' if fname in sp2ion[0]: self.siesta_ion_interp(sp2ion, fname=fname) self.sp2vna = [None]*len(sp2ion) # Interpolate a Neutral-Atom potential V_NA(r) for each specie self.sp2rcut_vna = np.zeros(len(sp2ion)) for isp, ion in enumerate(sp2ion): if "vna" not in ion.keys(): continue if ion["vna"] is None: continue self.sp2rcut_vna[isp] = ion["vna"]["cutoff"] h,dat = ion["vna"]["delta"][0], ion["vna"]["data"][0][:,1] d2 = spline_diff2(h, dat, 0.0, 1.0e301) self.sp2vna[isp] = np.array([0.5*spline_interp(h, dat, d2, r) for r in self.rr]) # given in Rydberg in sp2ion self.sp2chlocal = [None]*len(sp2ion) # Interpolate the atomic charges for each specie self.sp2rcut_chlocal = np.zeros(len(sp2ion)) for isp, ion in enumerate(sp2ion): if "chlocal" not in ion.keys(): continue if ion["chlocal"] is None: continue self.sp2rcut_chlocal[isp] = ion["chlocal"]["cutoff"] h,dat = ion["chlocal"]["delta"][0], ion["chlocal"]["data"][0][:,1] d2 = spline_diff2(h, dat, 0.0, 1.0e301) self.sp2chlocal[isp] = np.array([spline_interp(h, dat, d2, r) for r in self.rr]) return self # # # def siesta_ion_interp(self, sp2ion, fname='paos'): from pyscf.nao.m_get_sp_mu2s import get_sp_mu2s from pyscf.nao.m_spline_diff2 import spline_diff2 from pyscf.nao.m_spline_interp import spline_interp """ Interpolation of orbitals or projectors given on linear grid in the ion dictionary rr : is the grid on which we want the function sp2ion : list of dictionaries fname : function name, can be 'paos' or 'kbs' """ rr, nr, nsp = self.rr, len(self.rr), len(sp2ion) pname = {'paos': 'orbital', 'kbs': 'projector'}[fname] self.nspecies = len(sp2ion) self.sp2nmult = np.zeros(self.nspecies, dtype='int64') self.sp_mu2rcut = [None]*self.nspecies self.sp_mu2j = [None]*self.nspecies self.sp_mu2s = [None]*self.nspecies self.sp2norbs = np.zeros(self.nspecies, dtype='int64') self.sp2rcut = np.zeros(self.nspecies) self.sp2charge = np.zeros(self.nspecies, dtype='int64') self.sp2valence = np.zeros(self.nspecies, dtype='int64') for isp, ion in enumerate(sp2ion): if ion[fname] is None: continue self.sp2nmult[isp] = len(ion[fname]['data']) self.sp_mu2rcut[isp] = np.array(ion[fname]["cutoff"]) self.sp_mu2j[isp] = np.array([o["l"] for o in ion[fname][pname]], dtype='int64') mu2s = np.zeros(self.sp2nmult[isp]+1, dtype='int64') for mu in range(self.sp2nmult[isp]): mu2s[mu+1] = sum(2*self.sp_mu2j[isp][0:mu+1]+1) self.sp_mu2s[isp] = mu2s self.sp2norbs[isp] = self.sp_mu2s[isp][self.sp2nmult[isp]] self.sp2rcut[isp] = np.amax(self.sp_mu2rcut[isp]) self.sp2charge[isp] = int(self.sp2ion[isp]['z']) self.sp2valence[isp] = int(self.sp2ion[isp]['valence']) self.jmx = max([mu2j.max() for mu2j in self.sp_mu2j if mu2j is not None]) if fname=='kbs': self.sp_mu2vkb = [None]*self.nspecies for isp, ion in enumerate(sp2ion): if ion[fname] is None: continue self.sp_mu2vkb[isp] = np.array([0.5*p['ref_energy'] for p in ion['kbs']['projector'] ]) self.psi_log_rl = [None]*self.nspecies for isp, ion in enumerate(sp2ion): if ion[fname] is None: continue ff = np.zeros((len(ion[fname][pname]), nr)) for mu,(h,dat) in enumerate(zip(ion[fname]["delta"],ion[fname]["data"])): diff2 = spline_diff2(h, dat[:,1], 0.0, 1.0e301) for i, r in enumerate(rr): ff[mu,i] = spline_interp(h, dat[:,1], diff2, r) self.psi_log_rl[isp] = ff self.psi_log = [None]*self.nspecies for isp, (mu2ff, mu2j) in enumerate(zip(self.psi_log_rl, self.sp_mu2j)): if mu2ff is None: continue gg = np.zeros((len(mu2j), nr)) for mu,(ff,j) in enumerate(zip(mu2ff,mu2j)): gg[mu] = ff*(rr**j) self.psi_log[isp] = gg def init_ao_log_gpaw(self, **kw): """ Reads radial orbitals from a previous GPAW calculation. """ from pyscf.nao.m_log_interp import log_interp_c #self.setups = setups if setup is saved in ao_log, we get the following error # while performing copy # File "/home/marc/anaconda2/lib/python2.7/copy.py", line 182, in deepcopy # rv = reductor(2) # TypeError: can't pickle Spline objects self.interp_rr,self.interp_pp = log_interp_c(self.rr), log_interp_c(self.pp) setups = kw['setups'] sdic = setups.setups self.sp2key = sdic.keys() #key0 = sdic.keys()[0] #print(key0, sdic[key0].Z, dir(sdic[key0])) self.sp_mu2j = [np.array(sdic[key].l_orb_j, np.int64) for key in sdic.keys()] self.sp2nmult = np.array([len(mu2j) for mu2j in self.sp_mu2j], dtype=np.int64) self.sp2charge = np.array([sdic[key].Z for key in sdic.keys()], dtype=np.int64) self.nspecies = len(self.sp_mu2j) self.jmx = max([max(mu2j) for mu2j in self.sp_mu2j]) self.sp2norbs = np.array([sum(2*mu2j+1) for mu2j in self.sp_mu2j], dtype=np.int64) self.sp_mu2rcut = [] self.psi_log_rl = [] self.psi_log = [] for sp,[key,nmu,mu2j] in enumerate(zip(sdic.keys(), self.sp2nmult, self.sp_mu2j)): self.sp_mu2rcut.append(np.array([phit.get_cutoff() for phit in sdic[key].phit_j])) mu2ff = np.zeros([nmu, self.nr]) for mu,phit in enumerate(sdic[key].phit_j): for ir, r in enumerate(self.rr): mu2ff[mu,ir],deriv = phit.get_value_and_derivative(r) self.psi_log_rl.append(mu2ff) self.psi_log.append(mu2ff* (self.rr**mu2j[mu])) self.sp2rcut = np.array([np.amax(rcuts) for rcuts in self.sp_mu2rcut], dtype='float64') # derived from sp_mu2rcut self.sp_mu2s = [] # derived from sp_mu2j for mu2j in self.sp_mu2j: mu2s = np.zeros(len(mu2j)+1, dtype=np.int64) for mu,j in enumerate(mu2j): mu2s[mu+1] = mu2s[mu]+2*j+1 self.sp_mu2s.append(mu2s) #self._add_sp2info() #self._add_psi_log_mom() #print(self.sp_mu2j) #print(self.sp2nmult) #print(self.nspecies) #print(self.jmx) #print(self.sp2norbs) #print(self.sp_mu2rcut) #print(self.psi_log) #print(self.sp2charge) #print(self.sp2rcut) #print(self.sp_mu2s) return self # def _add_sp2info(self): """ Adds a field sp2info containing, for each specie lists of integer charcteristics: """ self.sp2info = [] for sp,[mu2j,mu2s] in enumerate(zip(self.sp_mu2j,self.sp_mu2s)): self.sp2info.append([ [mu, j, mu2s[mu], mu2s[mu+1]] for mu,j in enumerate(mu2j)]) # def _add_psi_log_mom(self): """ Adds a field psi_log_mom which contains Bessel transforms of original radial functions (from psi_log) """ import numpy as np from pyscf.nao.m_sbt import sbt_c sbt = sbt_c(self.rr, self.pp, lmax=self.jmx) self.psi_log_mom = [] for sp,[nmu,mu2ff,mu2j] in enumerate(zip(self.sp2nmult, self.psi_log, self.sp_mu2j)): mu2ao = np.zeros((nmu,self.nr), dtype='float64') for mu,[am,ff] in enumerate(zip(mu2j,mu2ff)): mu2ao[mu,:] = sbt.sbt( ff, am, 1 ) self.psi_log_mom.append(mu2ao) del sbt # def view(self): """ Shows a plot of all radial orbitals """ import matplotlib.pyplot as plt for sp in range(self.nspecies): plt.figure(sp+1) plt.title('Orbitals for specie='+ str(sp)+' Znuc='+str(self.sp2charge[sp])) for j,ff in zip(self.sp_mu2j[sp], self.psi_log[sp]): if j>0 : plt.plot(self.rr, ff, '--', label=str(j)) else: plt.plot(self.rr, ff, '-', label=str(j)) #plt.xlim([0.0,3.0]) plt.legend() plt.show() def comp_moments(self): """ Computes the scalar and dipole moments of the product functions Args: argument can be prod_log or ao_log """ rr3dr = self.rr**3*np.log(self.rr[1]/self.rr[0]) rr4dr = self.rr*rr3dr sp2mom0,sp2mom1,cs,cd = [],[],np.sqrt(4*np.pi),np.sqrt(4*np.pi/3.0) for sp,nmu in enumerate(self.sp2nmult): nfunct=sum(2*self.sp_mu2j[sp]+1) mom0 = np.zeros((nfunct)) d = np.zeros((nfunct,3)) for mu,[j,s] in enumerate(zip(self.sp_mu2j[sp],self.sp_mu2s[sp])): if j==0: mom0[s] = cs*sum(self.psi_log[sp][mu,:]*rr3dr) if j==1: d[s,1]=d[s+1,2]=d[s+2,0] = cd*sum(self.psi_log[sp][mu,:]*rr4dr) sp2mom0.append(mom0) sp2mom1.append(d) return sp2mom0,sp2mom1 def get_aoneo(self): """Packs the data into one array for a later transfer to the library """ import numpy as np from numpy import require, float64, concatenate as conc nr = self.nr nsp = self.nspecies nmt = sum(self.sp2nmult) nrt = nr*nmt nms = nmt+nsp nsvn = 200 + 2*nr + 4*nsp + 2*nmt + nrt + nms svn = require(np.zeros(nsvn), dtype=float64, requirements='CW') # Simple parameters i = 0 svn[i] = nsp; i+=1; svn[i] = nr; i+=1; svn[i] = self.rmin; i+=1; svn[i] = self.rmax; i+=1; svn[i] = self.kmax; i+=1; svn[i] = self.jmx; i+=1; svn[i] = conc(self.psi_log).sum(); i+=1; # Pointers to data i = 99 s = 199 svn[i] = s+1; i+=1; f=s+nr; svn[s:f] = self.rr; s=f; # pointer to rr svn[i] = s+1; i+=1; f=s+nr; svn[s:f] = self.pp; s=f; # pointer to pp svn[i] = s+1; i+=1; f=s+nsp; svn[s:f] = self.sp2nmult; s=f; # pointer to sp2nmult svn[i] = s+1; i+=1; f=s+nsp; svn[s:f] = self.sp2rcut; s=f; # pointer to sp2rcut svn[i] = s+1; i+=1; f=s+nsp; svn[s:f] = self.sp2norbs; s=f; # pointer to sp2norbs svn[i] = s+1; i+=1; f=s+nsp; svn[s:f] = self.sp2charge; s=f; # pointer to sp2charge svn[i] = s+1; i+=1; f=s+nmt; svn[s:f] = conc(self.sp_mu2j); s=f; # pointer to sp_mu2j svn[i] = s+1; i+=1; f=s+nmt; svn[s:f] = conc(self.sp_mu2rcut); s=f; # pointer to sp_mu2rcut svn[i] = s+1; i+=1; f=s+nrt; svn[s:f] = conc(self.psi_log).reshape(nrt); s=f; # pointer to psi_log svn[i] = s+1; i+=1; f=s+nms; svn[s:f] = conc(self.sp_mu2s); s=f; # pointer to sp_mu2s svn[i] = s+1; # this is a terminator to simple operation return svn # # # def ao_eval(self, ra, isp, coords): from pyscf.nao.m_rsphar_libnao import rsphar """ Compute the values of atomic orbitals on given grid points Args: ao : instance of ao_log_c class ra : vector where the atomic orbitals from "ao" are centered isp : specie index for which we compute coords: coordinates on which we compute Returns: res[norbs,ncoord] : array of atomic orbital values """ rrs = cdist(ra.reshape((1,3)), coords).reshape(-1) rcutmx = self.sp2rcut[isp] mu_c2pao = self.interp_rr.interp_csr(self.psi_log_rl[isp], rrs, rcut=rcutmx) #print(__name__, mu_c2pao.shape) res = np.zeros((self.sp2norbs[isp],coords.shape[0])) jmx_sp = self.sp_mu2j[isp].max() rsh = np.zeros((jmx_sp+1)**2) for icrd,(coord,r) in enumerate(zip(coords-ra, rrs)): if rrs[icrd]>rcutmx: continue rsphar(coord, jmx_sp, rsh) for mu,(j,s,f) in enumerate(zip(self.sp_mu2j[isp],self.sp_mu2s[isp],self.sp_mu2s[isp][1:])): fval = mu_c2pao[mu,icrd] if j==0 else mu_c2pao[mu,icrd] * r**j res[s:f,icrd] = fval * rsh[j*(j+1)-j:j*(j+1)+j+1] return res # # # if __name__=="__main__": from pyscf import gto from pyscf.nao.m_ao_log import ao_log_c """ Interpreting small Gaussian calculation """ mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz') # coordinates in Angstrom! ao = ao_log(gto=mol) print(ao.sp2norbs)
gkc1000/pyscf
pyscf/nao/ao_log.py
Python
apache-2.0
18,300
[ "GPAW", "Gaussian", "PySCF", "SIESTA" ]
898d6d779b76bf175dae643315dbeb8da6a80cb2bd43d517542ae3ff7fbf8c4d
"""Utility routines depending on the finder, a combination of code by Jack Jansen and erik@letterror.com. Most events have been captured from Lasso Capture AE and than translated to python code. IMPORTANT Note that the processes() function returns different values depending on the OS version it is running on. On MacOS 9 the Finder returns the process *names* which can then be used to find out more about them. On MacOS 8.6 and earlier the Finder returns a code which does not seem to work. So bottom line: the processes() stuff does not work on < MacOS9 Mostly written by erik@letterror.com """ import Finder from Carbon import AppleEvents import aetools import MacOS import sys import Carbon.File import Carbon.Folder import aetypes from types import * __version__ = '1.1' Error = 'findertools.Error' _finder_talker = None def _getfinder(): """returns basic (recyclable) Finder AE interface object""" global _finder_talker if not _finder_talker: _finder_talker = Finder.Finder() _finder_talker.send_flags = ( _finder_talker.send_flags | AppleEvents.kAECanInteract | AppleEvents.kAECanSwitchLayer) return _finder_talker def launch(file): """Open a file thru the finder. Specify file by name or fsspec""" finder = _getfinder() fss = Carbon.File.FSSpec(file) return finder.open(fss) def Print(file): """Print a file thru the finder. Specify file by name or fsspec""" finder = _getfinder() fss = Carbon.File.FSSpec(file) return finder._print(fss) def copy(src, dstdir): """Copy a file to a folder""" finder = _getfinder() if type(src) == type([]): src_fss = [] for s in src: src_fss.append(Carbon.File.FSSpec(s)) else: src_fss = Carbon.File.FSSpec(src) dst_fss = Carbon.File.FSSpec(dstdir) return finder.duplicate(src_fss, to=dst_fss) def move(src, dstdir): """Move a file to a folder""" finder = _getfinder() if type(src) == type([]): src_fss = [] for s in src: src_fss.append(Carbon.File.FSSpec(s)) else: src_fss = Carbon.File.FSSpec(src) dst_fss = Carbon.File.FSSpec(dstdir) return finder.move(src_fss, to=dst_fss) def sleep(): """Put the mac to sleep""" finder = _getfinder() finder.sleep() def shutdown(): """Shut the mac down""" finder = _getfinder() finder.shut_down() def restart(): """Restart the mac""" finder = _getfinder() finder.restart() #--------------------------------------------------- # Additional findertools # def reveal(file): """Reveal a file in the finder. Specify file by name, fsref or fsspec.""" finder = _getfinder() fsr = Carbon.File.FSRef(file) file_alias = fsr.FSNewAliasMinimal() return finder.reveal(file_alias) def select(file): """select a file in the finder. Specify file by name, fsref or fsspec.""" finder = _getfinder() fsr = Carbon.File.FSRef(file) file_alias = fsr.FSNewAliasMinimal() return finder.select(file_alias) def update(file): """Update the display of the specified object(s) to match their on-disk representation. Specify file by name, fsref or fsspec.""" finder = _getfinder() fsr = Carbon.File.FSRef(file) file_alias = fsr.FSNewAliasMinimal() return finder.update(file_alias) #--------------------------------------------------- # More findertools # def comment(object, comment=None): """comment: get or set the Finder-comment of the item, displayed in the 'Get Info' window.""" object = Carbon.File.FSRef(object) object_alias = object.FSNewAliasMonimal() if comment == None: return _getcomment(object_alias) else: return _setcomment(object_alias, comment) def _setcomment(object_alias, comment): finder = _getfinder() args = {} attrs = {} aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cobj'), form="alis", seld=object_alias, fr=None) aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('comt'), fr=aeobj_00) args['----'] = aeobj_01 args["data"] = comment _reply, args, attrs = finder.send("core", "setd", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): return args['----'] def _getcomment(object_alias): finder = _getfinder() args = {} attrs = {} aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cobj'), form="alis", seld=object_alias, fr=None) aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('comt'), fr=aeobj_00) args['----'] = aeobj_01 _reply, args, attrs = finder.send("core", "getd", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): return args['----'] #--------------------------------------------------- # Get information about current processes in the Finder. def processes(): """processes returns a list of all active processes running on this computer and their creators.""" finder = _getfinder() args = {} attrs = {} processnames = [] processnumbers = [] creators = [] partitions = [] used = [] ## get the processnames or else the processnumbers args['----'] = aetypes.ObjectSpecifier(want=aetypes.Type('prcs'), form="indx", seld=aetypes.Unknown('abso', "all "), fr=None) _reply, args, attrs = finder.send('core', 'getd', args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) p = [] if args.has_key('----'): p = args['----'] for proc in p: if hasattr(proc, 'seld'): # it has a real name processnames.append(proc.seld) elif hasattr(proc, 'type'): if proc.type == "psn ": # it has a process number processnumbers.append(proc.data) ## get the creators args = {} attrs = {} aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('prcs'), form="indx", seld=aetypes.Unknown('abso', "all "), fr=None) args['----'] = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('fcrt'), fr=aeobj_0) _reply, args, attrs = finder.send('core', 'getd', args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(_arg) if args.has_key('----'): p = args['----'] creators = p[:] ## concatenate in one dict result = [] if len(processnames) > len(processnumbers): data = processnames else: data = processnumbers for i in range(len(creators)): result.append((data[i], creators[i])) return result class _process: pass def isactiveprocess(processname): """Check of processname is active. MacOS9""" all = processes() ok = 0 for n, c in all: if n == processname: return 1 return 0 def processinfo(processname): """Return an object with all process properties as attributes for processname. MacOS9""" p = _process() if processname == "Finder": p.partition = None p.used = None else: p.partition = _processproperty(processname, 'appt') p.used = _processproperty(processname, 'pusd') p.visible = _processproperty(processname, 'pvis') #Is the process' layer visible? p.frontmost = _processproperty(processname, 'pisf') #Is the process the frontmost process? p.file = _processproperty(processname, 'file') #the file from which the process was launched p.filetype = _processproperty(processname, 'asty') #the OSType of the file type of the process p.creatortype = _processproperty(processname, 'fcrt') #the OSType of the creator of the process (the signature) p.accepthighlevel = _processproperty(processname, 'revt') #Is the process high-level event aware (accepts open application, open document, print document, and quit)? p.hasscripting = _processproperty(processname, 'hscr') #Does the process have a scripting terminology, i.e., can it be scripted? return p def _processproperty(processname, property): """return the partition size and memory used for processname""" finder = _getfinder() args = {} attrs = {} aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('prcs'), form="name", seld=processname, fr=None) aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type(property), fr=aeobj_00) args['----'] = aeobj_01 _reply, args, attrs = finder.send("core", "getd", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): return args['----'] #--------------------------------------------------- # Mess around with Finder windows. def openwindow(object): """Open a Finder window for object, Specify object by name or fsspec.""" finder = _getfinder() object = Carbon.File.FSRef(object) object_alias = object.FSNewAliasMinimal() args = {} attrs = {} _code = 'aevt' _subcode = 'odoc' aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'), form="alis", seld=object_alias, fr=None) args['----'] = aeobj_0 _reply, args, attrs = finder.send(_code, _subcode, args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) def closewindow(object): """Close a Finder window for folder, Specify by path.""" finder = _getfinder() object = Carbon.File.FSRef(object) object_alias = object.FSNewAliasMinimal() args = {} attrs = {} _code = 'core' _subcode = 'clos' aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'), form="alis", seld=object_alias, fr=None) args['----'] = aeobj_0 _reply, args, attrs = finder.send(_code, _subcode, args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) def location(object, pos=None): """Set the position of a Finder window for folder to pos=(w, h). Specify file by name or fsspec. If pos=None, location will return the current position of the object.""" object = Carbon.File.FSRef(object) object_alias = object.FSNewAliasMinimal() if not pos: return _getlocation(object_alias) return _setlocation(object_alias, pos) def _setlocation(object_alias, (x, y)): """_setlocation: Set the location of the icon for the object.""" finder = _getfinder() args = {} attrs = {} aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'), form="alis", seld=object_alias, fr=None) aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('posn'), fr=aeobj_00) args['----'] = aeobj_01 args["data"] = [x, y] _reply, args, attrs = finder.send("core", "setd", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) return (x,y) def _getlocation(object_alias): """_getlocation: get the location of the icon for the object.""" finder = _getfinder() args = {} attrs = {} aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'), form="alis", seld=object_alias, fr=None) aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('posn'), fr=aeobj_00) args['----'] = aeobj_01 _reply, args, attrs = finder.send("core", "getd", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): pos = args['----'] return pos.h, pos.v def label(object, index=None): """label: set or get the label of the item. Specify file by name or fsspec.""" object = Carbon.File.FSRef(object) object_alias = object.FSNewAliasMinimal() if index == None: return _getlabel(object_alias) if index < 0 or index > 7: index = 0 return _setlabel(object_alias, index) def _getlabel(object_alias): """label: Get the label for the object.""" finder = _getfinder() args = {} attrs = {} aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cobj'), form="alis", seld=object_alias, fr=None) aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('labi'), fr=aeobj_00) args['----'] = aeobj_01 _reply, args, attrs = finder.send("core", "getd", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): return args['----'] def _setlabel(object_alias, index): """label: Set the label for the object.""" finder = _getfinder() args = {} attrs = {} _code = 'core' _subcode = 'setd' aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="alis", seld=object_alias, fr=None) aeobj_1 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('labi'), fr=aeobj_0) args['----'] = aeobj_1 args["data"] = index _reply, args, attrs = finder.send(_code, _subcode, args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) return index def windowview(folder, view=None): """windowview: Set the view of the window for the folder. Specify file by name or fsspec. 0 = by icon (default) 1 = by name 2 = by button """ fsr = Carbon.File.FSRef(folder) folder_alias = fsr.FSNewAliasMinimal() if view == None: return _getwindowview(folder_alias) return _setwindowview(folder_alias, view) def _setwindowview(folder_alias, view=0): """set the windowview""" attrs = {} args = {} if view == 1: _v = aetypes.Type('pnam') elif view == 2: _v = aetypes.Type('lgbu') else: _v = aetypes.Type('iimg') finder = _getfinder() aeobj_0 = aetypes.ObjectSpecifier(want = aetypes.Type('cfol'), form = 'alis', seld = folder_alias, fr=None) aeobj_1 = aetypes.ObjectSpecifier(want = aetypes.Type('prop'), form = 'prop', seld = aetypes.Type('cwnd'), fr=aeobj_0) aeobj_2 = aetypes.ObjectSpecifier(want = aetypes.Type('prop'), form = 'prop', seld = aetypes.Type('pvew'), fr=aeobj_1) aeobj_3 = aetypes.ObjectSpecifier(want = aetypes.Type('prop'), form = 'prop', seld = _v, fr=None) _code = 'core' _subcode = 'setd' args['----'] = aeobj_2 args['data'] = aeobj_3 _reply, args, attrs = finder.send(_code, _subcode, args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): return args['----'] def _getwindowview(folder_alias): """get the windowview""" attrs = {} args = {} finder = _getfinder() args = {} attrs = {} aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'), form="alis", seld=folder_alias, fr=None) aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('cwnd'), fr=aeobj_00) aeobj_02 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('pvew'), fr=aeobj_01) args['----'] = aeobj_02 _reply, args, attrs = finder.send("core", "getd", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) views = {'iimg':0, 'pnam':1, 'lgbu':2} if args.has_key('----'): return views[args['----'].enum] def windowsize(folder, size=None): """Set the size of a Finder window for folder to size=(w, h), Specify by path. If size=None, windowsize will return the current size of the window. Specify file by name or fsspec. """ fsr = Carbon.File.FSRef(folder) folder_alias = fsr.FSNewAliasMinimal() openwindow(fsr) if not size: return _getwindowsize(folder_alias) return _setwindowsize(folder_alias, size) def _setwindowsize(folder_alias, (w, h)): """Set the size of a Finder window for folder to (w, h)""" finder = _getfinder() args = {} attrs = {} _code = 'core' _subcode = 'setd' aevar00 = [w, h] aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'), form="alis", seld=folder_alias, fr=None) aeobj_1 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('cwnd'), fr=aeobj_0) aeobj_2 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('ptsz'), fr=aeobj_1) args['----'] = aeobj_2 args["data"] = aevar00 _reply, args, attrs = finder.send(_code, _subcode, args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) return (w, h) def _getwindowsize(folder_alias): """Set the size of a Finder window for folder to (w, h)""" finder = _getfinder() args = {} attrs = {} aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'), form="alis", seld=folder_alias, fr=None) aeobj_1 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('cwnd'), fr=aeobj_0) aeobj_2 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('posn'), fr=aeobj_1) args['----'] = aeobj_2 _reply, args, attrs = finder.send('core', 'getd', args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): return args['----'] def windowposition(folder, pos=None): """Set the position of a Finder window for folder to pos=(w, h).""" fsr = Carbon.File.FSRef(folder) folder_alias = fsr.FSNewAliasMinimal() openwindow(fsr) if not pos: return _getwindowposition(folder_alias) if type(pos) == InstanceType: # pos might be a QDPoint object as returned by _getwindowposition pos = (pos.h, pos.v) return _setwindowposition(folder_alias, pos) def _setwindowposition(folder_alias, (x, y)): """Set the size of a Finder window for folder to (w, h).""" finder = _getfinder() args = {} attrs = {} aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'), form="alis", seld=folder_alias, fr=None) aeobj_1 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('cwnd'), fr=aeobj_0) aeobj_2 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('posn'), fr=aeobj_1) args['----'] = aeobj_2 args["data"] = [x, y] _reply, args, attrs = finder.send('core', 'setd', args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): return args['----'] def _getwindowposition(folder_alias): """Get the size of a Finder window for folder, Specify by path.""" finder = _getfinder() args = {} attrs = {} aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'), form="alis", seld=folder_alias, fr=None) aeobj_1 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('cwnd'), fr=aeobj_0) aeobj_2 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('ptsz'), fr=aeobj_1) args['----'] = aeobj_2 _reply, args, attrs = finder.send('core', 'getd', args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): return args['----'] def icon(object, icondata=None): """icon sets the icon of object, if no icondata is given, icon will return an AE object with binary data for the current icon. If left untouched, this data can be used to paste the icon on another file. Development opportunity: get and set the data as PICT.""" fsr = Carbon.File.FSRef(object) object_alias = fsr.FSNewAliasMinimal() if icondata == None: return _geticon(object_alias) return _seticon(object_alias, icondata) def _geticon(object_alias): """get the icondata for object. Binary data of some sort.""" finder = _getfinder() args = {} attrs = {} aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cobj'), form="alis", seld=object_alias, fr=None) aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('iimg'), fr=aeobj_00) args['----'] = aeobj_01 _reply, args, attrs = finder.send("core", "getd", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): return args['----'] def _seticon(object_alias, icondata): """set the icondata for object, formatted as produced by _geticon()""" finder = _getfinder() args = {} attrs = {} aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cobj'), form="alis", seld=object_alias, fr=None) aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('iimg'), fr=aeobj_00) args['----'] = aeobj_01 args["data"] = icondata _reply, args, attrs = finder.send("core", "setd", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): return args['----'].data #--------------------------------------------------- # Volumes and servers. def mountvolume(volume, server=None, username=None, password=None): """mount a volume, local or on a server on AppleTalk. Note: mounting a ASIP server requires a different operation. server is the name of the server where the volume belongs username, password belong to a registered user of the volume.""" finder = _getfinder() args = {} attrs = {} if password: args["PASS"] = password if username: args["USER"] = username if server: args["SRVR"] = server args['----'] = volume _reply, args, attrs = finder.send("aevt", "mvol", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): return args['----'] def unmountvolume(volume): """unmount a volume that's on the desktop""" putaway(volume) def putaway(object): """puth the object away, whereever it came from.""" finder = _getfinder() args = {} attrs = {} args['----'] = aetypes.ObjectSpecifier(want=aetypes.Type('cdis'), form="name", seld=object, fr=None) _reply, args, attrs = talker.send("fndr", "ptwy", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): return args['----'] #--------------------------------------------------- # Miscellaneous functions # def volumelevel(level): """set the audio output level, parameter between 0 (silent) and 7 (full blast)""" finder = _getfinder() args = {} attrs = {} if level < 0: level = 0 elif level > 7: level = 7 args['----'] = level _reply, args, attrs = finder.send("aevt", "stvl", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): return args['----'] def OSversion(): """return the version of the system software""" finder = _getfinder() args = {} attrs = {} aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('ver2'), fr=None) args['----'] = aeobj_00 _reply, args, attrs = finder.send("core", "getd", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): return args['----'] def filesharing(): """return the current status of filesharing and whether it is starting up or not: -1 file sharing is off and not starting up 0 file sharing is off and starting up 1 file sharing is on""" status = -1 finder = _getfinder() # see if it is on args = {} attrs = {} args['----'] = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('fshr'), fr=None) _reply, args, attrs = finder.send("core", "getd", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): if args['----'] == 0: status = -1 else: status = 1 # is it starting up perchance? args = {} attrs = {} args['----'] = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('fsup'), fr=None) _reply, args, attrs = finder.send("core", "getd", args, attrs) if args.has_key('errn'): raise Error, aetools.decodeerror(args) if args.has_key('----'): if args['----'] == 1: status = 0 return status def movetotrash(path): """move the object to the trash""" fss = Carbon.File.FSSpec(path) trashfolder = Carbon.Folder.FSFindFolder(fss.as_tuple()[0], 'trsh', 0) move(path, trashfolder) def emptytrash(): """empty the trash""" finder = _getfinder() args = {} attrs = {} args['----'] = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('trsh'), fr=None) _reply, args, attrs = finder.send("fndr", "empt", args, attrs) if args.has_key('errn'): raise aetools.Error, aetools.decodeerror(args) def _test(): import EasyDialogs print 'Original findertools functionality test...' print 'Testing launch...' pathname = EasyDialogs.AskFileForOpen('File to launch:') if pathname: result = launch(pathname) if result: print 'Result: ', result print 'Press return-', sys.stdin.readline() print 'Testing print...' pathname = EasyDialogs.AskFileForOpen('File to print:') if pathname: result = Print(pathname) if result: print 'Result: ', result print 'Press return-', sys.stdin.readline() print 'Testing copy...' pathname = EasyDialogs.AskFileForOpen('File to copy:') if pathname: destdir = EasyDialogs.AskFolder('Destination:') if destdir: result = copy(pathname, destdir) if result: print 'Result:', result print 'Press return-', sys.stdin.readline() print 'Testing move...' pathname = EasyDialogs.AskFileForOpen('File to move:') if pathname: destdir = EasyDialogs.AskFolder('Destination:') if destdir: result = move(pathname, destdir) if result: print 'Result:', result print 'Press return-', sys.stdin.readline() print 'Testing sleep...' if EasyDialogs.AskYesNoCancel('Sleep?') > 0: result = sleep() if result: print 'Result:', result print 'Press return-', sys.stdin.readline() print 'Testing shutdown...' if EasyDialogs.AskYesNoCancel('Shut down?') > 0: result = shutdown() if result: print 'Result:', result print 'Press return-', sys.stdin.readline() print 'Testing restart...' if EasyDialogs.AskYesNoCancel('Restart?') > 0: result = restart() if result: print 'Result:', result print 'Press return-', sys.stdin.readline() def _test2(): print '\nmorefindertools version %s\nTests coming up...' %__version__ import os import random # miscellaneous print '\tfilesharing on?', filesharing() # is file sharing on, off, starting up? print '\tOS version', OSversion() # the version of the system software # set the soundvolume in a simple way print '\tSystem beep volume' for i in range(0, 7): volumelevel(i) MacOS.SysBeep() # Finder's windows, file location, file attributes open("@findertoolstest", "w") f = "@findertoolstest" reveal(f) # reveal this file in a Finder window select(f) # select this file base, file = os.path.split(f) closewindow(base) # close the window this file is in (opened by reveal) openwindow(base) # open it again windowview(base, 1) # set the view by list label(f, 2) # set the label of this file to something orange print '\tlabel', label(f) # get the label of this file # the file location only works in a window with icon view! print 'Random locations for an icon' windowview(base, 0) # set the view by icon windowsize(base, (600, 600)) for i in range(50): location(f, (random.randint(10, 590), random.randint(10, 590))) windowsize(base, (200, 400)) windowview(base, 1) # set the view by icon orgpos = windowposition(base) print 'Animated window location' for i in range(10): pos = (100+i*10, 100+i*10) windowposition(base, pos) print '\twindow position', pos windowposition(base, orgpos) # park it where it was before print 'Put a comment in file', f, ':' print '\t', comment(f) # print the Finder comment this file has s = 'This is a comment no one reads!' comment(f, s) # set the Finder comment def _test3(): print 'MacOS9 or better specific functions' # processes pr = processes() # return a list of tuples with (active_processname, creatorcode) print 'Return a list of current active processes:' for p in pr: print '\t', p # get attributes of the first process in the list print 'Attributes of the first process in the list:' pinfo = processinfo(pr[0][0]) print '\t', pr[0][0] print '\t\tmemory partition', pinfo.partition # the memory allocated to this process print '\t\tmemory used', pinfo.used # the memory actuall used by this process print '\t\tis visible', pinfo.visible # is the process visible to the user print '\t\tis frontmost', pinfo.frontmost # is the process the front most one? print '\t\thas scripting', pinfo.hasscripting # is the process scriptable? print '\t\taccepts high level events', pinfo.accepthighlevel # does the process accept high level appleevents? if __name__ == '__main__': _test() _test2() _test3()
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-2.3/Lib/plat-mac/findertools.py
Python
mit
30,503
[ "BLAST" ]
d3793bd588c12ec578e81c979ec8c3844e910263d95e0ea4b6a8c956f904b923
# ================================================================================================== # Copyright 2011 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this work except in compliance with the License. # You may obtain a copy of the License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ================================================================================================== __author__ = 'Brian Wickman' import logging from twitter.common.log.initialize import ( init, teardown_disk_logging, teardown_stderr_logging) try: from twitter.common import app from twitter.common.log.options import LogOptions class LoggingSubsystem(app.Module): def __init__(self): app.Module.__init__(self, __name__, description="Logging subsystem.") def setup_function(self): if not LogOptions._is_disk_logging_required(): init() else: init(app.name()) app.register_module(LoggingSubsystem()) except ImportError: # Do not require twitter.common.app pass debug = logging.debug info = logging.info warning = logging.warning warn = logging.warning error = logging.error fatal = logging.fatal log = logging.log logger = logging.getLogger DEBUG = logging.DEBUG INFO = logging.INFO WARNING = logging.WARNING WARN = logging.WARN ERROR = logging.ERROR FATAL = logging.FATAL __all__ = [ # directives 'debug', 'info', 'warning', 'warn', # alias 'error', 'fatal', 'log', 'logger', # levels 'DEBUG', 'INFO', 'WARNING', 'WARN', 'ERROR', 'FATAL', # only if you're not using app directly. 'init', 'teardown_stderr_logging', 'teardown_disk_logging', # ditto 'formatters' ]
imsut/commons
src/python/twitter/common/log/__init__.py
Python
apache-2.0
2,176
[ "Brian" ]
928acb41e14fac221ba2c4a2185f881cba612ad56b2080ef321c46a9bfcba5b7
#!/usr/bin/env python # Python Module to work with ROMS # Owner: Rafael Soutelino - rsoutelino@gmail.com # Committers: Andre Lobato - andrefelipelobato@gmail.com # Last modification: Mar, 2016 ##################################################################### from dateutil.parser import parse import numpy as np import pylab as pl import matplotlib.pyplot as plt from matplotlib import delaunay from matplotlib.mlab import griddata import scipy.interpolate as spint from mpl_toolkits.basemap import Basemap import scipy.io as sp import datetime as dt import netCDF4 as nc import yaml ### CLASS RunSetup ################################################# def __version__(): return "romslab-0.2" class RunSetup(object): """ROMS domain config file""" def __init__(self, filename, imp): self.filename = filename self.id = imp f = yaml.load( open(filename) ) configs = f[imp] for key in configs.keys(): execstr = "self.%s = configs[key]" %key exec(execstr) self.lonmin, self.lonmax = self.lims[0], self.lims[1] self.latmin, self.latmax = self.lims[2], self.lims[3] self.hmaxc = self.hmin ### CLASS RomsGrid ################################################## class RomsGrid(object): """ Stores and manipulates netcdf ROMS grid file information """ def __init__(self,filename): self.filename = filename self.ncfile = nc.Dataset(filename, mode='r+') self.lonr = self.ncfile.variables['lon_rho'][:] self.latr = self.ncfile.variables['lat_rho'][:] self.lonu = self.ncfile.variables['lon_u'][:] self.latu = self.ncfile.variables['lat_u'][:] self.lonv = self.ncfile.variables['lon_v'][:] self.latv = self.ncfile.variables['lat_v'][:] self.h = self.ncfile.variables['h'][:] self.maskr = self.ncfile.variables['mask_rho'][:] self.masku = self.ncfile.variables['mask_u'][:] self.maskv = self.ncfile.variables['mask_v'][:] def corners(self): """ Returns lon, lat cornes for a map projection: Usage: llclon, urclon, llclat, urclat = corners(self) """ llclon = self.lonr.min() urclon = self.lonr.max() llclat = self.latr.min() urclat = self.latr.max() return llclon, urclon, llclat, urclat ### CLASS RomsHis ############################################## class RomsHis(object): """ Stores and manipulates netcdf ROMS history file information !!! Under construction !!! """ def __init__(self,filename): self.filename = filename self.ncfile = nc.Dataset(filename, mode='r') self.varlist = list(self.ncfile.variables) for var in self.varlist: exec("self.%s = self.ncfile.variables['%s']" %(var, var) ) # self.lonr = self.ncfile.variables['lon_rho'][:] # self.latr = self.ncfile.variables['lat_rho'][:] # self.lonu = self.ncfile.variables['lon_u'][:] # self.latu = self.ncfile.variables['lat_u'][:] # self.lonv = self.ncfile.variables['lon_v'][:] # self.latv = self.ncfile.variables['lat_v'][:] # self.maskr = self.ncfile.variables['mask_rho'][:] # self.masku = self.ncfile.variables['mask_u'][:] # self.maskv = self.ncfile.variables['mask_v'][:] def corners(self): """ Returns lon, lat cornes for a map projection: Usage: llclon, urclon, llclat, urclat = corners(self) """ llclon = self.lonr.min() urclon = self.lonr.max() llclat = self.latr.min() urclat = self.latr.max() return llclon, urclon, llclat, urclat ### CLASS M2_diagnostics ##################################################### class M2_diagnostics(object): """ Container class for the depth-averaged (2D) momentum equation diagostic terms for a ROMS run. USAGE ----- m2_terms = M2_diagnostics(diafile, verbose=False) diafile is a ROMS diagnostics file (*_dia.nc). This class extracts all the M2 diagnostic terms available in the file. Returns an object with a '.xi' and a '.eta' attribute. Those are dictionaries that store the <netCDF4.Variable> objects corresponding to each term. """ def __init__(self, diafile): dia = nc.Dataset(diafile) self.diafile = dia self.time = dia.variables['ocean_time'][:]/86400. # Model time in days. self._RUN_AVERAGED = False self._TIME_AVERAGED = False self.xi = dict() self.eta = dict() self.xi_labels = dict() self.eta_labels = dict() self.keys_xi = ['ut','uux','vuy','ucor','upgrd','uistr','usstr','ubstr'] self.keys_eta = ['vt','uvx','vvy','vcor','vpgrd','vistr','vsstr','vbstr'] vals_xi = ['ubar_accel','ubar_xadv','ubar_yadv','ubar_cor',\ 'ubar_prsgrd','ubar_hvisc','ubar_sstr','ubar_bstr'] vals_eta = ['vbar_accel','vbar_xadv','vbar_yadv','vbar_cor',\ 'vbar_prsgrd','vbar_hvisc','vbar_sstr','vbar_bstr'] ## Terms of the M2 balance in the XI-component. for key,val in zip(self.keys_xi,vals_xi): try: self.xi[key] = dia.variables[val] except KeyError: print "Warning: %s not found in diagnostics file."%val pass ## Terms of the M2 balance in the ETA-component. for key,val in zip(self.keys_eta,vals_eta): try: self.eta[key] = dia.variables[val] except KeyError: print "Warning: %s not found in diagnostics file."%val pass ## Move all fields to PSI-points. print "" print "Moving all terms to PSI-points." for term in self.xi.iterkeys(): self.xi[term] = 0.5*(self.xi[term][:,1:,:]+self.xi[term][:,:-1,:]) for term in self.eta.iterkeys(): self.eta[term] = 0.5*(self.eta[term][:,:,1:]+self.eta[term][:,:,:-1]) self.nt = dia.variables['ocean_time'].size self.x = self.diafile.variables['lon_psi'][:] self.y = self.diafile.variables['lat_psi'][:] self.nxy = self.x.shape ## Rotate all fields from (xi,eta) to (zonal,meridional) axes. print "" print "Rotating all terms to (estward,northward) coordinates." ang = self.diafile.variables['angle'][:] ang = 0.5*(ang[1:,:]+ang[:-1,:]) ang = 0.5*(ang[:,1:]+ang[:,:-1]) ## Rotation angle is from (xi,eta) to (eastward,northward) axes. ang = -ang for termx,termy in zip(self.keys_xi,self.keys_eta): try: termxi_tmp = self.xi[termx] print termx except KeyError: print "Warning: %s not available."%termx continue try: termeta_tmp = self.eta[termy] print termy except KeyError: print "Warning: %s not available."%termy continue ## Rotation angle is from (xi,eta) to (eastward,northward) axes. self.xi[termx] = + termxi_tmp*np.cos(ang) + termeta_tmp*np.sin(ang) self.eta[termy] = - termxi_tmp*np.sin(ang) + termeta_tmp*np.cos(ang) ## Labels of the terms of the M2 balance in the XI-component (in TeX code). self.xi_labels['ut'] = ur'$\bar{u}_t$' self.xi_labels['uux'] = ur'$\bar{u}\bar{u}_x$' self.xi_labels['vuy'] = ur'$\bar{v}\bar{u}_y$' self.xi_labels['ucor'] = ur'$-f\bar{v}$' self.xi_labels['upgrd'] = ur'$-p_x/\rho_0$' self.xi_labels['uistr'] = ur'$A_H$\nabla\bar{u}' self.xi_labels['usstr'] = ur'$\tau_s^x/(H\rho_0)$' self.xi_labels['ubstr'] = ur'$-\tau_b^x/(H\rho_0)$' ## Labels of the terms of the M2 balance in the ETA-component (in TeX code). self.eta_labels['vt'] = ur'$\bar{v}_t$' self.eta_labels['uvx'] = ur'$\bar{u}\bar{v}_x$' self.eta_labels['vvy'] = ur'$\bar{v}\bar{v}_y$' self.eta_labels['vcor'] = ur'$f\bar{u}$' self.eta_labels['vpgrd'] = ur'$-p_y/\rho_0$' self.eta_labels['vistr'] = ur'$A_H$\nabla\bar{v}' self.eta_labels['vsstr'] = ur'$\tau_s^y/(H\rho_0)$' self.eta_labels['vbstr'] = ur'$-\tau_b^y/(H\rho_0)$' def run_average(self, verbose=True): """ USAGE ----- m2_terms.run_average(verbose=True) Takes the time average of all terms over the entire run. Use CAUTION with very large records to avoid a MemoryError. """ if self._RUN_AVERAGED or self._TIME_AVERAGED: print "Terms have already been time-averaged." return else: print "Averaging %s records together."%self.nt for term in self.xi.iterkeys(): if verbose: print "Run-averaging %s term."%term self.xi[term] = self.xi[term][:].mean(axis=0) for term in self.eta.iterkeys(): if verbose: print "Run-averaging %s term."%term self.eta[term] = self.eta[term][:].mean(axis=0) self._RUN_AVERAGED = True def time_average(self, verbose=True, tstart=0., tend=10.): """ USAGE ----- m2_terms.time_average(self, verbose=True, tstart=0., tend=10.) Takes the time average of all terms over a specified time interval [tstart, tend]. Use CAUTION with very large records to avoid a MemoryError. """ if self._RUN_AVERAGED or self._TIME_AVERAGED: print "Terms have already been time-averaged." return else: Time = self.time Time-=Time[0] t1 = np.abs(Time-tstart).argmin() t2 = np.abs(Time-tend).argmin() time = Time[t1:t2] print "Averaging records between days %.2f and %.2f."%(self.time[t1], self.time[t2]) for term in self.xi.iterkeys(): if verbose: print "Time-averaging %s term."%term self.xi[term] = self.xi[term][t1:t2,:].mean(axis=0) for term in self.eta.iterkeys(): if verbose: print "Time-averaging %s term."%term self.eta[term] = self.eta[term][t1:t2,:].mean(axis=0) self._TIME_AVERAGED = True def interp2line(self, ipts): """ USAGE ----- m2_terms.interp2line(ipts) Interpolates the terms to a given line with coordinates 'ipts', where ipts is a tuple like (lons.ravel(),lats.ravel()). Use CAUTION with very large records to avoid a MemoryError. """ pts = (self.x.ravel(),self.y.ravel()) self.nxy = ipts[0].size # Updating array shapes. if self._RUN_AVERAGED or self._TIME_AVERAGED: for term in self.xi.iterkeys(): print "Interpolating %s term."%term self.xi[term] = spint.griddata(pts, self.xi[term].ravel(), ipts, method='linear') for term in self.eta.iterkeys(): print "Interpolating %s term."%term self.eta[term] = spint.griddata(pts, self.eta[term].ravel(), ipts, method='linear') else: skel = np.zeros((self.nt, self.nxy)) for term in self.xi.iterkeys(): xiterm_aux = skel.copy() print "" print "Interpolating %s term."%term for n in xrange(self.nt): print "Interpolating record %d/%d to line."%(n+1,self.nt) xiterm_aux[n,:] = spint.griddata(pts, self.xi[term][n,:].ravel(), ipts, method='linear') self.xi[term] = xiterm_aux for term in self.eta.iterkeys(): etaterm_aux = skel.copy() print "" print "Interpolating %s term."%term for n in xrange(self.nt): print "Interpolating record %d/%d to line."%(n+1,self.nt) etaterm_aux[n,:] = spint.griddata(pts, self.eta[term][n,:].ravel(), ipts, method='linear') self.eta[term] = etaterm_aux def rotate(self, ang_rot, degrees=False): """ USAGE ----- m2_terms.rotate(ang_rot, degrees=False) Rotates all terms from (eastward,northward) axes to arbitrary (x*,y*) axes, e.g., an isobath-following coordinate system. if 'ang_rot' is a scalar, all points are rotated by the same angle. If 'ang_rot' is an array the same shape as the terms, the rotation is done point-wise. """ if degrees: ang_rot = ang_rot*np.pi/180. # Degrees to radians. if self._RUN_AVERAGED or self._TIME_AVERAGED: print "Rotating all records to (x*,y*) coordinates." else: pass for termx,termy in zip(self.keys_xi,self.keys_eta): if self._RUN_AVERAGED or self._TIME_AVERAGED: try: termxi_tmp = self.xi[termx] print termx except KeyError: print "Warning: %s not available."%termx continue try: termeta_tmp = self.eta[termy] print termy except KeyError: print "Warning: %s not available."%termy continue self.xi[termx] = + termxi_tmp*np.cos(ang_rot) + termeta_tmp*np.sin(ang_rot) self.eta[termy] = - termxi_tmp*np.sin(ang_rot) + termeta_tmp*np.cos(ang_rot) else: for n in xrange(self.nt): print "Rotating record %d/%d to (x*,y*) coordinates."%(n+1,self.nt) try: termxi_tmp = self.xi[termx][n,:] print termx except KeyError: print "Warning: %s not available."%termx continue try: termeta_tmp = self.eta[termy][n,:] print termy except KeyError: print "Warning: %s not available."%termy continue self.xi[termx][n,:] = + termxi_tmp*np.cos(ang_rot) + termeta_tmp*np.sin(ang_rot) self.eta[termy][n,:] = - termxi_tmp*np.sin(ang_rot) + termeta_tmp*np.cos(ang_rot) def check_magnitudes(self, print_terms=False): """ USAGE ----- m2_terms.check_magnitudes() Displays statistics (min,mean,max) of the absolute values of each term. If momentum was conserved exactly, then all terms would sum up to zero. This is useful as a first verification of the order of magnitude to which depth-averaged momentum is conserved in the solution. For example, if the leading-order balance is geostrophic and has O(1e-5 m/s2) magnitudes, the next-order balance is frictional and has O(1e-6 m/s2) magnitudes and all the terms sum up to a residue of O(1e-7 m/s2), then an analysis of the leading-order terms (pressure gradient and Coriolis forces) should be consistent and allow for accurate conclusions. Users may want to look for specific grid points where the terms just don't add up. """ residuex = np.zeros(self.nxy) residuey = np.zeros(self.nxy) print "" print "Calculating magnitudes of the M2 balance terms." if self._RUN_AVERAGED or self._TIME_AVERAGED: for termx,termy in zip(self.keys_xi,self.keys_eta): try: Termx = self.xi[termx] except KeyError: print "Warning: %s not available."%termx continue try: Termy = self.eta[termy] except KeyError: print "Warning: %s not available."%termy continue ## Moving local acceleration terms to the same side of the equality as the other terms. if termx=='ut': Termx = -Termx else: pass if termy=='vt': Termy = -Termy else: pass if print_terms: print "" print "%s (min,mean,max) %.1e %.1e %.1e"%(termx,np.nanmin(np.abs(Termx)), np.nanmean(np.abs(Termx)), np.nanmax(np.abs(Termx))) print "%s (min,mean,max) %.1e %.1e %.1e"%(termy,np.nanmin(np.abs(Termy)), np.nanmean(np.abs(Termy)), np.nanmax(np.abs(Termy))) print "" residuex+=Termx residuey+=Termy print "" print "================================" print "RUN_AVERAGED M2 balance residues" print "================================" print "" print "XI (min,mean,max) %.1e %.1e %.1e"%(np.nanmin(np.abs(residuex)), np.nanmean(np.abs(residuex)), np.nanmax(np.abs(residuex))) print "ETA (min,mean,max) %.1e %.1e %.1e"%(np.nanmin(np.abs(residuey)), np.nanmean(np.abs(residuey)), np.nanmax(np.abs(residuey))) print "++++++++++++++++++++++++++++++++++++" else: for n in xrange(self.nt): for termx,termy in zip(self.keys_xi,self.keys_eta): try: Termx = self.xi[termx][n,:] except KeyError: print "Warning: %s not available."%termx continue try: Termy = self.eta[termy][n,:] except KeyError: print "Warning: %s not available."%termy continue ## Moving local acceleration terms to the same side of the equality as the other terms. if termx=='ut': Termx = -Termx else: pass if termy=='vt': Termy = -Termy else: pass if print_terms: print "" print "%s (min,mean,max) %.1e %.1e %.1e"%(termx,np.nanmin(np.abs(Termx)), np.nanmean(np.abs(Termx)), np.nanmax(np.abs(Termx))) print "%s (min,mean,max) %.1e %.1e %.1e"%(termy,np.nanmin(np.abs(Termy)), np.nanmean(np.abs(Termy)), np.nanmax(np.abs(Termy))) print "" residuex+=Termx residuey+=Termy print "" print "================================================" print "INSTANTANEOUS M2 balance residues (record %d/%d)"%(n+1,self.nt) print "================================================" print "" print "XI (min,mean,max) %.1e %.1e %.1e"%(np.nanmin(np.abs(residuex)), np.nanmean(np.abs(residuex)), np.nanmax(np.abs(residuex))) print "ETA (min,mean,max) %.1e %.1e %.1e"%(np.nanmin(np.abs(residuey)), np.nanmean(np.abs(residuey)), np.nanmax(np.abs(residuey))) print "++++++++++++++++++++++++++++++++++++" residuex = np.zeros(self.nxy) residuey = np.zeros(self.nxy) ### CLASS PlotROMS ##################################################### class PlotROMS(object): """ Visualization of ROMS outputs DEPENDS ON EXTERNAL FUNCTIONS AND CLASSES FROM ROMSLAB MODULE: get_depths !!! Under construction !!! """ # getting ROMS varibles as class attributes def __init__(self, outname): self.outname = outname self.outfile = nc.Dataset(outname, mode='r') varlist = ['lon_rho', 'lat_rho', 'lon_u', 'lat_u', 'lon_v', 'lat_v', 'h', 'angle', 'ocean_time', 'temp', 'salt', 'ubar', 'vbar', 'u', 'v', 'zeta'] namelist = ['lonr', 'latr', 'lonu', 'latu', 'lonv', 'latv', 'h', 'angle', 'time', 'temp', 'salt', 'ubar', 'vbar', 'u', 'v', 'zeta'] for name, var in zip(namelist, varlist): try: exec "self.%s = self.outfile.variables['%s']" %(name, var) except KeyError: print "WARNING: ROMS output NetCDF file must contain the \ variable '%s' !! \n None was assined to this attribute. Some methods may \ not work properly.\n" %var exec "self.%s = None" %name self.lonr = self.lonr[:]; self.lonu = self.lonu[:]; self.lonv = self.lonv[:]; self.latr = self.latr[:]; self.latu = self.latu[:]; self.latv = self.latv[:]; self.h = self.h[:] self.lm, self.km, self.im, self.jm = self.temp[:].shape dates = [] # dates are obtained by ROMS defaut origin (0001/01/01) for k in range(0,len(self.time[:])): sec = dt.timedelta(seconds=int(self.time[k] ) ) dates.append( dt.datetime(1, 1, 1) + sec ) self.dates = dates def corners(self): """ Returns lon, lat cornes for a map projection: Usage: llclon, urclon, llclat, urclat = corners(self) """ llclon = self.lonr.min() urclon = self.lonr.max() llclat = self.latr.min() urclat = self.latr.max() return llclon, urclon, llclat, urclat def inimap(self, lims=None): """ Initializes a Basemap object instance to use as map projection for future use inside this class INPUT: lims: Map lon, lat limits [tuple: (lonmin, lonmax, latmin, latmax)] If not provided, grid corners will be used as limits OUTPUT: Basemap object instance """ if not lims: lims = self.corners() m = Basemap(projection='merc', llcrnrlon=lims[0], urcrnrlon=lims[1], llcrnrlat=lims[2], urcrnrlat=lims[3], lat_ts=0, resolution='i') self.mlon, self.mlat = m(self.lonr, self.latr) return m def hslice(self, m, l=-1, nk=-1, velkey=1, trkey=1, vsc=10, vskip=1, tr='temp', trmin=None, trmax=None): """ Vel over temp or vel over salt at any Z level !!! Under construction !!! USAGE: hslice= (m, args) INPUT: m: Basemap instance [use the method inimap to create that] l: model time step [integer] nk: depth [integer, meters] (Default = -1: the shalowest level ) velkey: flag to plot [1] or not [0] velocity vectors trkey: flag to plot [1] or not [0] tracer field vsc: quiver scale [integer] vskip: vector skip for nice quiver presentation [integer] tr: kind of tracer [string: 'temp' or 'salt'] trmin: minimum value for tracer colormap trmax: maximum value for tracer colormap OUTPUT: Matplotlib figure on screen Figure properties and plotted arrays will become available as PlotROMS class attributes """ # preparing velocity vetors if velkey: zu = get_depths(self.outfile, l, 'u') zv = get_depths(self.outfile, l, 'v') u = 0*self.lonu; v = 0*self.lonv # initializing arrays if nk == -1: print "\n\nSurface level was chosen! %s\n\n" % ("."*50) u = self.u[l, nk,...] v = self.v[l, nk,...] else: print "\n\nInterpolating VEL from S to Z coordinates %s\n\n" % ("."*50) for a in range (0, self.im): for b in range(0, self.jm-1): u[a,b] = np.interp(-nk, zu[:, a, b], self.u[l, :, a, b] ) for a in range (0, self.im-1): for b in range(0, self.jm): v[a,b] = np.interp(-nk, zv[:, a, b], self.v[l, :, a, b] ) print "\n\nInterpolating (u,v) to rho-points %s\n\n" % ("."*50) u = griddata(self.lonu.ravel(), self.latu.ravel(), u.ravel(), self.lonr, self.latr) v = griddata(self.lonv.ravel(), self.latv.ravel(), v.ravel(), self.lonr, self.latr) # rotating vel according to grid angle u = u*np.cos(self.angle[:]) - v*np.sin(self.angle[:]) v = u*np.sin(self.angle[:]) + v*np.cos(self.angle[:]) # preparing tracer field if trkey: exec "tmp = self.%s" % (tr) zt = get_depths(self.outfile, l, 'temp') tracer = self.lonr*0 # initializing array if nk == -1: print "\n\nSurface level was chosen! %s\n\n" % ("."*50) tracer = tmp[l, nk,...] else: print "\n\nInterpolating TRACER from S to Z coordinates %s\n\n" % ("."*50) for a in range (0, self.im): for b in range(0, self.jm): tracer[a,b] = np.interp(-nk, zt[:, a, b], tmp[l, :, a, b] ) # masking out absurd values and in land values if velkey: u = np.ma.masked_where(u > 10, u) v = np.ma.masked_where(v > 10, v) u = np.ma.masked_where(self.h < nk, u) v = np.ma.masked_where(self.h < nk, v) if trkey: tracer = np.ma.masked_where(tracer > 40, tracer) tracer = np.ma.masked_where(self.h < nk, tracer) # initializing figure instance self.figure = plt.figure(facecolor='w') titlestr = '' if trkey: titlestr += "%s " %(tr) self.pcolor = m.pcolormesh(self.mlon, self.mlat, tracer, vmin=trmin, vmax=trmax) self.cbar = plt.colorbar() self.tracerAtZ = tracer if velkey: self.uAtZ = u self.vAtZ = v titlestr += "vel " self.quiver = m.quiver(self.mlon[::vskip,::vskip], self.mlat[::vskip,::vskip], u[::vskip,::vskip], v[::vskip,::vskip], scale=vsc) vmax = np.sqrt(u**2 + v**2).max() self.qkey = plt.quiverkey(self.quiver, 0.6, -0.05, vmax, r"$ %.0f cm s^{-1}$" %(vmax*100), labelpos='W', fontproperties={'weight': 'bold'}) self.contour = m.contour(self.mlon, self.mlat, self.h, (1000,200), colors='k') self.continents = m.fillcontinents() self.coast = m.drawcoastlines() titlestr += ": %04d/%02d/%02d %02d:%02dh" %(self.dates[l].year, self.dates[l].month, self.dates[l].day, self.dates[l].hour, self.dates[l].minute ) if nk == -1: titlestr += " : Surface" else: titlestr += " : %s m" %str(nk) self.figTitle = plt.title(titlestr, fontsize=10, fontweight='bold') plt.show() return def vslice(self, p1, p2, sc, zlim, field, cmap=plt.cm.jet, l=-1): """ Tracer vertical slice at any location !!! Under construction !!! USAGE: vslice = (p1, p2, sc, zlim, field, *args) INPUT: p1: starting (lon,lat) point of the transect [tuple] p2: ending (lon,lat) point of the transect [tuple] sc: scale for contour plot (min, max, step) [tuple, list or numpy array] zlim: vertical axis limits [tuple: (zmin, zmax)] field: field to be plotted [string: 'temp', 'salt', 'u', 'v'] cmap: colormap to be used l: model time step [integer] OUTPUT: Matplotlib figure on screen Figure properties and plotted arrays will become available as PlotROMS class attributes """ x = self.lonr y = self.latr t = self.temp[l,...] s = self.salt[l,...] u = self.u[l,...] v = self.v[l,...] ths = self.outfile.variables['theta_s'][:] thb = self.outfile.variables['theta_b'][:] hc = self.outfile.variables['hc'][:] z = zlevs(self.h[:], self.zeta[l,...], ths, thb, hc, self.km, 'r') res = ( np.gradient(self.lonr)[1].mean() + np.gradient(self.latr)[0].mean() ) / 2 siz = np.sqrt( (p1[0] - p2[0])**2 + (p1[1] - p2[1])**2 ) / res xs = np.linspace(p1[0], p2[0], siz) ys = np.linspace(p1[1], p2[1], siz) zs, ts, ss, us, vs = [], [], [], [], [] for k in range(0, xs.size): lin, col = near2d( x, y, xs[k], ys[k] ) zs.append( z[:, lin, col] ) ts.append( t[:, lin, col] ) ss.append( t[:, lin, col] ) us.append( u[:, lin, col] ) vs.append( v[:, lin, col] ) zs = np.array(zs) ts = np.array(ts) ss = np.array(ss) us = np.array(us) vs = np.array(vs) xs.shape = (xs.size, 1) ys.shape = (ys.size, 1) xs = xs.repeat(self.km, axis=1) ys = ys.repeat(self.km, axis=1) # computing cross and along transect velocity components exec "ps = p%s" %field[0] self.xVslice = xs self.yVslice = ys self.zVslice = zs self.tVslice = ts self.sVslice = ss self.uVslice = us self.vVslice = vs titlestr = "%s " %(tr) titlestr += ": %04d/%02d/%02d %02d:%02dh" %(self.dates[l].year, self.dates[l].month, self.dates[l].day, self.dates[l].hour, self.dates[l].minute ) self.figure = plt.figure(facecolor='w') ax1 = self.figure.add_axes([0.1, 0.1, 0.3, 0.3]) self.contourf = plt.contourf(xs, zs, ps, sc, axes=ax1, cmap=cmap) if xs[0,0] > xs[-1,-1]: ax1.set_xlim(ax1.get_xlim()[::-1]) if p1[0] == p2[0]: ax1.xaxis.set_ticklabels('') tit = "Longitude = %s" %str(p1[0]) else: tit = "Longitude" ax1.set_xlabel(tit) ax1.set_ylim(zlim) ax1.set_ylabel('z [m]') ax2 = self.figure.add_axes([0.5, 0.5, 0.3, 0.3]) con = plt.contourf(ys, zs, ps, sc, axes=ax2, cmap=cmap) if ys[0,0] > ys[-1,-1]: ax2.set_xlim(ax2.get_xlim()[::-1]) if p1[1] == p2[1]: ax2.xaxis.set_ticklabels('') tit = "Latitude = %s" %str(p1[1]) else: tit = "Latitude" ax2.set_yticklabels('') ax2.set_ylim(zlim) ax2.xaxis.set_ticks_position('top') ax1.set_position( [0.125, 0.1, 0.7, 0.75] ) ax2.set_position( [0.125, 0.1, 0.7, 0.75] ) self.figTitle = ax1.set_title(titlestr, fontsize=10, fontweight='bold') self.figTitle.set_position( (0.5, 1.12) ) ax3 = self.figure.add_axes([0.85, 0.1, 0.015, 0.75]) cbar = plt.colorbar(con, cax=ax3, orientation='vertical') cbar.set_label('$^\circ$ C') ax4 = self.figure.add_axes([0.4, 0.83, 0.2, 0.05]) ax4.set_title(tit, fontsize=10) ax4.set_axis_off() plt.show() return ### CLASS LoadEtopo5 ################################################### class LoadEtopo5(object): """Reads and stores ETOPO 5 data""" def __init__(self): import netCDF4 as nc import numpy as np self.ncfile = nc.Dataset('/home/rsoutelino/misc/etopo5.nc') lon = self.ncfile.variables['topo_lon'][:] lat = self.ncfile.variables['topo_lat'][:] self.lon, self.lat = np.meshgrid(lon, lat) self.h = self.ncfile.variables['topo'][:] ### FUNCTION near ################################################### def near(x,x0): """ Find the index where x has the closer value to x0 """ dx = x - x0 dx = abs(dx) fn = np.where( dx == dx.min() ) return fn ### FUNCTION near2d ################################################### def near2d(x, y, x0, y0): """ Find the indexes of the grid point that is nearest a chosen (x0, y0). Usage: line, col = near2d(x, y, x0, y0) """ dx = np.abs(x - x0); dx = dx / dx.max() dy = np.abs(y - y0); dy = dy / dy.max() dn = dx + dy fn = np.where(dn == dn.min()) line = int(fn[0]) col = int(fn[1]) return line, col ### FUNCTION subset ################################################### def subset(x, y, z, xmin, xmax, ymin, ymax): """ Returns a subset z array based on x and y limits Usage: x2, y2, z2 = subset(x, y, z, xmin, xmax, ymin, ymax) """ f = np.where((x >= xmin) & (x <= xmax) & (y >= ymin) & (y <= ymax) ) x2 = x[ f[0][0]:f[0][-1], f[1][0]:f[1][-1] ] y2 = y[ f[0][0]:f[0][-1], f[1][0]:f[1][-1] ] z2 = z[ f[0][0]:f[0][-1], f[1][0]:f[1][-1] ] return x2, y2, z2 ### FUNCTION ZLEV ################################################### def zlev(h,theta_s,theta_b,Tcline,N,kgrid=0,zeta=0): """ Set S-Curves in domain [-1 < sc < 0] at vertical W- or RHO-points. On Input: h Bottom depth (m) of RHO-points (matrix). theta_s S-coordinate surface control parameter (scalar): [0 < theta_s < 20]. theta_b S-coordinate bottom control parameter (scalar): [0 < theta_b < 1]. Tcline Width (m) of surface or bottom boundary layer in which higher vertical resolution is required during streching (scalar). N Number of vertical levels (scalar). kgrid Depth grid type logical switch: kgrid = 0 -> depths of RHO-points. kgrid = 1 -> depths of W-points. On Output: z Depths (m) of RHO- or W-points (matrix). dz Mesh size (m) at W- or RHO-points (matrix). sc S-coordinate independent variable, [-1 < sc < 0] at vertical RHO-points (vector). Cs Set of S-curves used to stretch the vertical coordinate lines that follow the topography at vertical RHO-points (vector). Copyright (c) 2003 UCLA - Patrick Marchesiello Translated to python by Rafael Soutelino - rsoutelino@gmail.com Last Modification: Aug, 2010 """ Np = N + 1 ds = 1/N hmin = h.min() hc = min(hmin, Tcline) Mr, Lr = h.shape; if kgrid==0: zeta = np.zeros([Mr, Lr]) grid = 'r' elif zeta==0: zeta = np.zeros([Mr, Lr]) if grid == 'r': Nlev = N lev = np.arange(1, N+1, 1) sc = -1 + (lev-0.5) * ds else: Nlev = Np lev = np.arange(0, N+1, 1) sc = -1 + lev * ds Ptheta = np.sinh(theta_s * sc) / np.sinh(theta_s) Rtheta = np.tanh(theta_s * (sc + 0.5) ) / (2*np.tanh(0.5 * theta_s)) -0.5 Cs = (1-theta_b)*Ptheta + theta_b*Rtheta cff0 = 1 + sc cff1 = (sc-Cs) * hc cff2 = Cs z = np.zeros([N, Mr, Lr]) for k in np.arange(0, Nlev, 1): z[k,:,:] = cff0[k]*zeta + cff1[k] + cff2[k]*h dz = np.zeros([N, Mr, Lr]) for k in np.arange(1, Nlev, 1): dz[k-1,:,:] = z[k,:,:] - z[k-1,:,:] return z, dz ### FUNCTION ZTOSIGMA ################################################### def ztosigma(var,z,depth): """ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Copyright (c) 2003 UCLA - Pierrick Penven % % Translated to Python by Rafael Soutelino, rsoutelino@gmail.com % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % function vnew = ztosigma(var,z,depth) % % % % This function transform a variable from z to sigma coordinates % % % % On Input: % % % % var Variable z (3D matrix). % % z Sigma depths (m) of RHO- or W-points (3D matrix). % % depth z depth (vector; meters, negative). % % % % On Output: % % % % vnew Variable sigma (3D matrix). % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Last Modification: Aug, 2010 """ Ns, Mp, Lp = z.shape Nz = depth.size vnew = np.zeros([Ns, Mp, Lp]) imat, jmat = np.meshgrid(np.arange(1, Lp+1, 1), np.arange(1, Mp+1, 1)) # Find the grid position of the nearest vertical levels for ks in np.arange(0, Ns, 1): sigmalev = np.squeeze(z[ks,:,:]) thezlevs = 0 * sigmalev thezlevs = np.array(thezlevs, dtype=int) for kz in np.arange(0, Nz, 1): f = np.where( sigmalev > depth[kz] ) thezlevs[f] = thezlevs[f] + 1 pos = Nz * Mp * (imat-1) + Nz * (jmat-1) + thezlevs pos = np.array(pos, dtype=int) z1 = depth[thezlevs - 1]; z1 = np.squeeze(z1) z2 = depth[thezlevs]; z2 = np.squeeze(z2) var = np.ravel(var.transpose()) pos = pos.ravel() v1 = var[pos-1]; v1.shape = (Mp,Lp) v2 = var[pos]; v2.shape = (Mp,Lp) vnew[ks,:,:] = (((v1-v2) * sigmalev + v2*z1 - v1*z2) / (z1-z2)) return vnew ### FUNCTION SIGMATOZ ################################################### # def sigmatoz() ### FUNCTION RHO2UVP ################################################### def rho2uvp(rfield): """ ################################################################ # # compute the values at u,v and psi points... # # Further Information: # http://www.brest.ird.fr/Roms_tools/ # # This file is part of ROMSTOOLS # # ROMSTOOLS is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 2 of the License, # or (at your option) any later version. # # ROMSTOOLS is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307 USA # # Copyright (c) 2001-2006 by Pierrick Penven # e-mail:Pierrick.Penven@ird.fr # # Translated to Python by Rafael Soutelino, rsoutelino@gmail.com # Last Modification: Aug, 2010 ################################################################ """ Mp, Lp = rfield.shape M = Mp - 1 L = Lp - 1 vfield = 0.5 * ( rfield[np.arange(0,M),:] + rfield[np.arange(1,Mp),:] ) ufield = 0.5 * ( rfield[:,np.arange(0,L)] + rfield[:,np.arange(1,Lp)] ) pfield = 0.5 * ( ufield[np.arange(0,M),:] + ufield[np.arange(1,Mp),:] ) return ufield, vfield, pfield ### FUNCTION SPHERIC_DIST ############################################### def spheric_dist(lat1,lat2,lon1,lon2): """ ##################################################################### # # function dist=spheric_dist(lat1,lat2,lon1,lon2) # # compute distances for a simple spheric earth # # input: # # lat1 : latitude of first point (matrix) # lon1 : longitude of first point (matrix) # lat2 : latitude of second point (matrix) # lon2 : longitude of second point (matrix) # # output: # dist : distance from first point to second point (matrix) # # Further Information: # http://www.brest.ird.fr/Roms_tools/ # # This file is part of ROMSTOOLS # # ROMSTOOLS is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 2 of the License, # or (at your option) any later version. # # ROMSTOOLS is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307 USA # # Copyright (c) 2001-2006 by Pierrick Penven # e-mail:Pierrick.Penven@ird.fr # # Translated to Python by Rafael Soutelino, rsoutelino@gmail.com # Last Modification: Aug, 2010 ################################################################ """ R = 6367442.76 # Determine proper longitudinal shift. l = np.abs(lon2-lon1) l[np.where(l >= 180)] = 360 - l[np.where(l >= 180)] # Convert Decimal degrees to radians. deg2rad = np.pi/180 lat1 = lat1*deg2rad lat2 = lat2*deg2rad l = l*deg2rad # Compute the distances dist = R * np.arcsin( np.sqrt( ( (np.sin(l) * np.cos(lat2) )**2 )\ + (((np.sin(lat2) * np.cos(lat1)) - (np.sin(lat1) * np.cos(lat2)\ * np.cos(l)))**2) ) ) return dist ### FUNCTION GET_METRICS ################################################### def get_metrics(latu, lonu, latv, lonv): """ #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # # Compute the pm and pn factors of a grid netcdf file # # Further Information: # http://www.brest.ird.fr/Roms_tools/ # # This file is part of ROMSTOOLS # # ROMSTOOLS is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 2 of the License, # or (at your option) any later version. # # ROMSTOOLS is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307 USA # # Copyright (c) 2001-2006 by Pierrick Penven # e-mail:Pierrick.Penven@ird.fr # # Translated to Python by Rafael Soutelino, rsoutelino@gmail.com # Last Modification: Aug, 2010 #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% """ Mp, L = latu.shape M, Lp = latv.shape Lm = L - 1 Mm = M - 1 dx = np.zeros([Mp, Lp]) dy = np.zeros([Mp, Lp]) dndx = np.zeros([Mp, Lp]) dmde = np.zeros([Mp, Lp]) lat1 = latu[:,np.arange(0,Lm)] lat2 = latu[:,np.arange(1,L)] lon1 = lonu[:,np.arange(0,Lm)] lon2 = lonu[:,np.arange(1,L)] dx[:,np.arange(1,L)] = spheric_dist(lat1, lat2, lon1, lon2) dx[:,0] = dx[:,1] dx[:,Lp-1] = dx[:,L-1] lat1 = latv[np.arange(0,Mm),:] lat2 = latv[np.arange(1,M),:] lon1 = lonv[np.arange(0,Mm),:] lon2 = lonv[np.arange(1,M),:] dy[np.arange(1,M),:] = spheric_dist(lat1, lat2, lon1, lon2) dy[0,:] = dy[1,:] dy[Mp-1,:] = dy[M-1,:] pm = 1/dx pn = 1/dy # dndx and dmde pn2 = pn[1:-2, 2:-1]; pn3 = pn[1:-2, 1:-2] dndx[1:-2, 1:-2] = 0.5 * (1/pn2 - 1/pn3) pm2 = pm[2:-1, 1:-2]; pm3 = pm[1:-2, 1:-2] dmde[1:-2, 1:-2] = 0.5 * (1/pm2 - 1/pm3) return pm, pn, dndx, dmde ### FUNCTION GET_ANGLE ################################################### def get_angle(latu,lonu,argu1='wgs84'): """ ################################################################ # # Compute the grid orientation: angle [radians] # between XI-axis and the direction to the EAST # at RHO-points. # # lonu longitude of u points # latu latitude of u points # argu1: spheroid # 'clarke66' Clarke 1866 # 'iau73' IAU 1973 # 'wgs84' WGS 1984 (default) # 'sphere' Sphere of radius 6371.0 km # # copied from dist.m of the Oceans toolbox # # Translated to Python by Rafael Soutelino, rsoutelino@gmail.com # Last Modification: Aug, 2010 # ################################################################ """ spheroid = argu1 if spheroid[0:3] == 'sph': A = 6371000.0 B = A E = np.sqrt( A*A - B*B ) / A EPS = E*E / ( 1-E*E ) elif spheroid[0:3] == 'cla': A = 6378206.4 B = 6356583.8 E = np.sqrt( A*A - B*B ) / A EPS = E*E / ( 1. - E*E ) elif spheroid[0:3] == 'iau': A = 6378160.0 B = 6356774.516 E = np.sqrt( A*A - B*B ) / A EPS = E*E / ( 1. - E*E ) elif spheroid[0:3] == 'wgs': A = 6378137.0 E = 0.081819191 B = np.sqrt( A**2 - (A*E)**2) EPS = E*E / ( 1. - E*E ) else: print "Unknown spheroid was specified" latu = latu*np.pi / 180 # convert to radians lonu = lonu*np.pi / 180 latu[np.where(latu == 0)] = 2.2204e-16 # Fixes some nasty 0/0 cases M, L = latu.shape PHI1 = latu[0:, 0:-1] # endpoints of each segment XLAM1 = lonu[0:, 0:-1] PHI2 = latu[0:, 1:] XLAM2 = lonu[0:, 1:] # wiggle lines of constant lat to prevent numerical probs. f = np.where(PHI1 == PHI2) PHI2[f] = PHI2[f] + 1e-14 # wiggle lines of constant lon to prevent numerical probs. f = np.where(XLAM1 == XLAM2) XLAM2[f] = XLAM2[f] + 1e-14 # COMPUTE THE RADIUS OF CURVATURE IN THE PRIME VERTICAL FOR # EACH POINT xnu1 = A / np.sqrt( 1.0 - ( E * np.sin(PHI1) )**2 ) xnu2 = A / np.sqrt( 1.0 - ( E * np.sin(PHI2) )**2 ) # COMPUTE THE AZIMUTHS. azim IS THE AZIMUTH AT POINT 1 # OF THE NORMAL SECTION CONTAINING THE POINT 2 TPSI2 = ( 1.0 - E*E ) * np.tan(PHI2) + \ E * E * xnu1 * np.sin(PHI1) / (xnu2 * np.cos(PHI2) ) # SOME FORM OF ANGLE DIFFERENCE COMPUTED HERE?? DLAM = XLAM2 - XLAM1 CTA12 = ( np.cos(PHI1) * TPSI2 - \ np.sin(PHI1) * np.cos(DLAM) ) / np.sin(DLAM) azim = np.arctan( 1.0 / CTA12 ) # GET THE QUADRANT RIGHT DLAM2 = ( np.abs(DLAM) < np.pi ) * DLAM + ( DLAM >= np.pi ) * \ ( (-2)*np.pi + DLAM ) + ( DLAM <= -np.pi ) * \ ( 2*np.pi + DLAM ) azim = azim + ( azim < -np.pi ) * 2*np.pi - \ ( azim >= np.pi ) * 2*np.pi azim = azim + np.pi * np.sign(-azim) * \ ( np.sign(azim) != np.sign(DLAM2) ) ang = np.zeros([M, L+1]) ang[:,1:-1] = (np.pi/2) - azim ang[:,0] = ang[:,1] ang[:,-1] = ang[:,-2] return ang ### FUNCTION ADD_TOPO ################################################### def add_topo(lon, lat, pm, pn, toponame): """ ################################################################ # # add a topography (here etopo2) to a ROMS grid # # the topogaphy matrix is coarsened prior # to the interpolation on the ROMS grid tp # prevent the generation of noise due to # subsampling. this procedure ensure a better # general volume conservation. # # Last update Pierrick Penven 8/2006. # # # Further Information: # http://www.brest.ird.fr/Roms_tools/ # # This file is part of ROMSTOOLS # # ROMSTOOLS is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 2 of the License, # or (at your option) any later version. # # ROMSTOOLS is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307 USA # # Copyright (c) 2001-2006 by Pierrick Penven # e-mail:Pierrick.Penven@ird.fr # # Updated Aug-2006 by Pierrick Penven # Updated 2006/10/05 by Pierrick Penven (dl depend of model # resolution at low resolution) # Translated to Python by Rafael Soutelino, rsoutelino@gmail.com # Last Modification: Aug, 2010 ################################################################ """ print ' Reading topog data' dat = nc.Dataset(toponame) x = dat.variables['lon'][0,:] y = dat.variables['lat'][:,0] z = dat.variables['topo'][:] dxt = np.mean(np.abs(np.diff(np.ravel(x)))) dyt = np.mean(np.abs(np.diff(np.ravel(y)))) dxt = np.mean([dxt, dyt]) x, y = np.meshgrid(x, y) print ' Slicing topog data into ROMS domain' x, y, z = subset(x, y, z, lon.min()-1, lon.max()+1, lat.min()-1, lat.max()+1) # # slicing topog into roms domain # xm = np.ma.masked_where(x <= lon.min()-1, x) # ym = np.ma.masked_where(x <= lon.min()-1, y) # zm = np.ma.masked_where(x <= lon.min()-1, z) # x = np.ma.compress_cols(xm) # y = np.ma.compress_cols(ym) # z = np.ma.compress_cols(zm) # del xm, ym, zm # xm = np.ma.masked_where(x >= lon.max()+1, x) # ym = np.ma.masked_where(x >= lon.max()+1, y) # zm = np.ma.masked_where(x >= lon.max()+1, z) # x = np.ma.compress_cols(xm) # y = np.ma.compress_cols(ym) # z = np.ma.compress_cols(zm) # del xm, ym, zm # xm = np.ma.masked_where(y <= lat.min()-1, x) # ym = np.ma.masked_where(y <= lat.min()-1, y) # zm = np.ma.masked_where(y <= lat.min()-1, z) # x = np.ma.compress_rows(xm) # y = np.ma.compress_rows(ym) # z = np.ma.compress_rows(zm) # del xm, ym, zm # xm = np.ma.masked_where(y >= lat.max()+1, x) # ym = np.ma.masked_where(y >= lat.max()+1, y) # zm = np.ma.masked_where(y >= lat.max()+1, z) # x = np.ma.compress_rows(xm) # y = np.ma.compress_rows(ym) # z = np.ma.compress_rows(zm) # del xm, ym, zm dxr = np.mean( 1/pm ) dyr = np.mean( 1/pn ) dxr = np.mean([dxr, dyr]) dxr = np.floor(dxr/1852); dxr = dxr/60 # degrading original topog resolution according to roms # grid resolution to avoid unecessary heavy computations d = int(np.floor( dxr/dxt )) if d == 0: d = 1 x = x[0::d, 0::d] y = y[0::d, 0::d] z = z[0::d, 0::d] h = -z print ' Interp topog data into ROMS grid' h = griddata(x.ravel(),y.ravel(),h.ravel(),lon,lat,interp='nn') return h ### FUNCTION PROCESS_MASK ############################################ def process_mask(maskin): """ ################################################################ # # maskout=process_mask(maskin) # # Process the mask at rho-points in order to remove isolated # masked points, cape with only 1 mask... # Ensure continuous mask close to the boundaries # # Further Information: # http://www.brest.ird.fr/Roms_tools/ # # This file is part of ROMSTOOLS # # ROMSTOOLS is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 2 of the License, # or (at your option) any later version. # # ROMSTOOLS is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307 USA # # Copyright (c) 2001-2006 by Pierrick Penven # e-mail:Pierrick.Penven@ird.fr # Translated to Python by Rafael Soutelino, rsoutelino@gmail.com # Last Modification: Aug, 2010 ################################################################ """ maskout = maskin.copy() M, L = maskout.shape Mm = M - 1 Lm = L - 1 Mmm = Mm - 1 Lmm = Lm - 1 neibmask = 0 * maskout neibmask[1:Mm, 1:Lm] = maskout[0:Mmm, 1:Lm] + maskout[2:M, 1:Lm] +\ maskout[1:Mm, 0:Lmm] + maskout[1:Mm, 2:L] neibint = neibmask[1:Mm, 1:Lm] maskint = maskout[1:Mm, 1:Lm] F1 = neibint * 0; F2 = maskint * 0; Fa = maskint * 0 F1[ np.where(neibint >=3) ] = 1 F2[ np.where(maskint ==0) ] = 1 Fa = F1 == F2 F1 = neibint * 0; F2 = maskint * 0; Fb = maskint * 0 F1[ np.where(neibint <=1) ] = 1 F2[ np.where(maskint ==1) ] = 1 Fb = F1 == F2 F = Fa.sum() + Fb.sum() c = 1 if F > 0: f1 = neibmask >= 3 f2 = maskout == 0 f = f1 == f2 maskout[f] = 1 f1 = neibmask <= 1 f2 = maskout == 1 f = f1 == f2 maskout[f] = 0 maskout[0, 1:Lm] = maskout[1, 1:Lm] maskout[M-1, 1:Lm] = maskout[Mm-1, 1:Lm] maskout[1:Mm, 0] = maskout[1:Mm, 1] maskout[1:Mm, L-1] = maskout[1:Mm, Lm-1] maskout[0, 0] = min( maskout[0, 1] , maskout[1, 0] ) maskout[M-1, 0] = min( maskout[M-1, 1] , maskout[Mm-1, 0] ) maskout[0, L-1] = min( maskout[0, Lm-1] , maskout[1, L-1] ) maskout[M-1, L-1] = min( maskout[M-1, Lm-1], maskout[Mm-1, L-1] ) # Be sure that there is no problem close to the boundaries maskout[:,0] = maskout[:, 1] maskout[:,L-1] = maskout[:, Lm-1] maskout[0,:] = maskout[1, :] maskout[M-1,:] = maskout[Mm-1, :] return maskout ### FUNCTION UVP_MASK ################################################ def uvp_mask(rfield): """ ################################################################ # # compute the mask at u,v and psi points... # # Further Information: # http://www.brest.ird.fr/Roms_tools/ # # This file is part of ROMSTOOLS # # ROMSTOOLS is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 2 of the License, # or (at your option) any later version. # # ROMSTOOLS is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307 USA # # Copyright (c) 2001-2006 by Pierrick Penven # e-mail:Pierrick.Penven@ird.fr # # Translated to Python by Rafael Soutelino, rsoutelino@gmail.com # Last Modification: Aug, 2010 ################################################################ """ Mp, Lp = rfield.shape M = Mp-1 L = Lp-1 vfield = rfield[0:M, :] * rfield[1:Mp, :] ufield = rfield[:, 0:L] * rfield[:, 1:Lp] pfield = ufield[0:M, :] * ufield[1:Mp, :] return ufield, vfield, pfield ### FUNCTION ZLEVS ################################################### def zlevs(h,zeta,theta_s,theta_b,hc,N,type): """ ################################################################ # # function z = zlevs(h,zeta,theta_s,theta_b,hc,N,type); # # this function compute the depth of rho or w points for ROMS # # On Input: # # type 'r': rho point 'w': w point # # On Output: # # z Depths (m) of RHO- or W-points (3D matrix). # # Further Information: # http://www.brest.ird.fr/Roms_tools/ # # This file is part of ROMSTOOLS # # ROMSTOOLS is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 2 of the License, # or (at your option) any later version. # # ROMSTOOLS is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307 USA # # Copyright (c) 2002-2006 by Pierrick Penven # e-mail:Pierrick.Penven@ird.fr # Translated to Python by Rafael Soutelino, rsoutelino@gmail.com # Last Modification: Aug, 2010 ################################################################ """ M, L = h.shape cff1 = 1.0 / np.sinh( theta_s ) cff2 = 0.5 / np.tanh( 0.5*theta_s ) if type=='w': sc = ( np.arange(0,N+1) - N ) / N N = N + 1 else: sc = ( np.arange(1,N+1) - N - 0.5 ) / N Cs = (1 - theta_b) * cff1 * np.sinh( theta_s * sc ) + \ theta_b * ( cff2 * np.tanh(theta_s *(sc + 0.5) ) - 0.5 ) hinv = 1 / h cff = hc*( sc - Cs ) cff1 = Cs cff2 = sc + 1 z = np.zeros([N, M, L]) for k in range(0, N): z0 = cff[k] + cff1[k]*h z[k, :, :] = z0 + zeta*( 1 + z0*hinv ) return z ### FUNCTION GET_DEPTHS ############################################## def get_depths(fname,tindex,type): """ ###################################################################### # # Get the depths of the sigma levels # # Further Information: # http://www.brest.ird.fr/Roms_tools/ # # This file is part of ROMSTOOLS # # ROMSTOOLS is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 2 of the License, # or (at your option) any later version. # # ROMSTOOLS is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307 USA # # Copyright (c) 2002-2006 by Pierrick Penven # e-mail:Pierrick.Penven@ird.fr # # Translated to Python by Rafael Soutelino, rsoutelino@gmail.com # Last Modification: Aug, 2010 ###################################################################### """ def rho2u_3d(var_rho): N , Mp, Lp = var_rho.shape L = Lp - 1 var_u = 0.5*( var_rho[:,:,0:L] + var_rho[:,:,1:Lp] ) return var_u def rho2v_3d(var_rho): N, Mp, Lp = var_rho.shape M = Mp - 1 var_v = 0.5*( var_rho[:,0:M,:] + var_rho[:,1:Mp,:]) return var_v try: h = fname.variables['h'][:] except KeyError: h = fname.variables['dep'][:] zeta = np.squeeze( fname.variables['zeta'][tindex,...] ) zeta[ np.where(zeta > 1e36) ] = 0 theta_s = fname.variables['theta_s'][:] theta_b = fname.variables['theta_b'][:] Tcline = fname.variables['Tcline'][:] hc = fname.variables['hc'][:] s_rho = fname.variables['s_rho'][:] hmin = h.min() N = s_rho.size vtype = type if (type=='u') | (type=='v'): vtype='r' z = zlevs(h,zeta,theta_s,theta_b,hc,N,vtype) if type=='u': z = rho2u_3d(z) if type=='v': z = rho2v_3d(z) return z ### FUNCTION SMOOTHGRID ############################################## def smoothgrid(h,maskr,hmin,hmax_coast,rmax,n_filter_deep_topo,n_filter_final): """ # # Smooth the topography to get a maximum r factor = rmax # # n_filter_deep_topo: # Number of pass of a selective filter to reduce the isolated # seamounts on the deep ocean. # # n_filter_final: # Number of pass of a single hanning filter at the end of the # procedure to ensure that there is no 2DX noise in the # topography. # # Further Information: # http://www.brest.ird.fr/Roms_tools/ # # This file is part of ROMSTOOLS # # ROMSTOOLS is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 2 of the License, # or (at your option) any later version. # # ROMSTOOLS is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307 USA # # Copyright (c) 2005-2006 by Pierrick Penven # e-mail:Pierrick.Penven@ird.fr # # Contributions of A. Shchepetkin (UCLA), P. Marchesiello (IRD) # and X. Capet (UCLA) # # Updated Aug-2006 by Pierrick Penven # # Translated to Python by Rafael Soutelino, rsoutelino@gmail.com # Last Modification: Aug, 2010 ################################################################ """ # Cut the topography h[ np.where(h < hmin) ] = hmin # 1: Deep Ocean Filter if n_filter_deep_topo >= 1: print ' Removing isolated seamounts in the deep ocean' print ' ==> '+ str(n_filter_deep_topo) +' pass of selective filter' # Build a smoothing coefficient that is a linear function # of a smooth topography. coef = h.copy() for i in range(1, 8): coef = hanning_smoother(coef) # coef is a smoothed bathy coef = 0.125 * (coef / coef.max() ) # rescale the smoothed bathy for i in range(1, int(n_filter_deep_topo+1) ): h = hanning_smoother_coef2d(h, coef) # smooth with a variable coef f1 = 0*maskr; f2 = 0*h f1[ np.where(maskr == 0)] = 1 f2[ np.where(h > hmax_coast) ] = 1 f = f1 == f2 h[f] = hmax_coast print ' Applying filter on log(h) to reduce grad(h)/h' h = rotfilter(h, maskr, hmax_coast, rmax) # Smooth the topography again to prevent 2D noise if n_filter_final > 1: print ' Smooth the topography a last time to prevent 2DX noise' print ' ==> '+ str(n_filter_final) +' pass of hanning smoother' for i in range( 1, int(n_filter_final) ): h = hanning_smoother(h) f1 = 0*maskr; f2 = 0*h f1[ np.where(maskr == 0)] = 1 f2[ np.where(h > hmax_coast) ] = 1 f = f1 == f2 h[f] = hmax_coast h[ np.where(h < hmin) ] = hmin return h ###################################################### def rotfilter(h, maskr, hmax_coast, rmax): """ # # Apply a selective filter on log(h) to reduce grad(h)/h. # """ M, L = h.shape Mm = M - 1 Mmm = M - 2 Lm = L - 1 Lmm = L - 2 cff = 0.8 nu = 3.0/16.0 rx, ry = rfact(h) r = max(rx.max(), ry.max()) h = np.log(h) hmax_coast = np.log(hmax_coast) i = 0 while r > rmax: i = i + 1 cx = 0*rx; cy = 0*ry cx[ np.where(rx > cff*rmax) ] = 1 cx = hanning_smoother(cx) cy[ np.where(ry > cff*rmax) ] = 1 cy = hanning_smoother(cy) fx = cx * FX(h) fy = cy * FY(h) h[1:Mm, 1:Lm] = h[1:Mm, 1:Lm] + nu* \ ( (fx[1:Mm, 1:Lm] - fx[1:Mm, 0:Lmm] ) + \ (fy[1:Mm, 1:Lm] - fy[0:Mmm, 1:Lm] ) ) h[0, :] = h[1, :] h[M-1, :] = h[Mm-1, :] h[:, 0] = h[:, 1] h[:, L-1] = h[:, Lm-1] f1 = 0*maskr; f2 = 0*h f1[ np.where(maskr == 0)] = 1 f2[ np.where(h > hmax_coast) ] = 1 f = f1 == f2 h[f] = hmax_coast rx, ry = rfact( np.exp(h) ) r = max(rx.max(), ry.max()) print 'r factor = ' + str(r) h = np.exp(h) return h ########################################################## def rfact(h): M, L = h.shape Mm = M - 1 Mmm = M - 2 Lm = L - 1 Lmm = L - 2 rx = np.abs( h[0:M, 1:L] - h[0:M, 0:Lm] ) / ( h[0:M, 1:L] + h[0:M, 0:Lm] ) ry = np.abs( h[1:M, 0:L] - h[0:Mm, 0:L] ) / ( h[1:M, 0:L] + h[0:Mm, 0:L] ) return rx, ry ########################################################## def hanning_smoother(h): M, L = h.shape Mm = M - 1 Mmm = M - 2 Lm = L - 1 Lmm = L - 2 h[1:Mm, 1:Lm] = 0.125 * ( h[0:Mmm, 1:Lm] + h[2:M, 1:Lm] + \ h[1:Mm, 0:Lmm] + h[1:Mm, 2:L] + \ 4 * h[1:Mm, 1:Lm] ) h[0, :] = h[1, :] h[M-1, :] = h[Mm-1, :] h[:, 0] = h[:, 1] h[:, L-1] = h[:, Lm-1] return h ########################################################## def hanning_smoother_coef2d(h, coef): M, L = h.shape Mm = M - 1 Mmm = M - 2 Lm = L - 1 Lmm = L - 2 h[1:Mm, 1:Lm] = coef[1:Mm, 1:Lm] * ( h[0:Mmm, 1:Lm] + h[2:M, 1:Lm] + \ h[1:Mm, 0:Lmm] + h[1:Mm,2:L]) + \ (1 - 4 * coef[1:Mm, 1:Lm]) * h[1:Mm, 1:Lm] h[0,:] = h[1, :] h[M-1,:] = h[Mm-1, :] h[:,0] = h[:, 1] h[:,L-1] = h[:, Lm-1] return h ########################################################## def FX(h): M, L = h.shape Mm = M - 1 Mmm = M - 2 Lm = L - 1 Lmm = L - 2 fx = np.zeros([M, Lm]) fx[1:Mm, :] = ( h[1:Mm, 1:L] - h[1:Mm, 0:Lm] ) * 5/6 + \ ( h[0:Mmm, 1:L] - h[0:Mmm, 0:Lm] + h[2:M, 1:L] - h[2:M, 0:Lm] ) / 12 fx[0, :] = fx[1, :] fx[M-1, :] = fx[Mm-1, :] return fx ########################################################## def FY(h): M, L = h.shape Mm = M - 1 Mmm = M - 2 Lm = L - 1 Lmm = L - 2 fy = np.zeros([Mm, L]) fy[:, 1:Lm] = ( h[1:M, 1:Lm] - h[0:Mm, 1:Lm] ) * 5/6 + \ ( h[1:M, 0:Lmm] - h[0:Mm, 0:Lmm] + h[1:M, 2:L] - h[0:Mm, 2:L] ) / 12 fy[:, 0] = fy[:, 1] fy[:, L-1] = fy[:, Lm-1] return fy def wind_stress(u, v): """ function [taux,tauy]=wind_stress(u,v) This function computes wind stress using Large and Pond formula. On Input: u East-West wind component (m/s). v North-West wind component (m/s). On Output: taux East-West wind stress component (Pa). tauy East-West wind stress component (Pa). """ rhoa = 1.22 speed = np.sqrt(u*u + v*v) Cd = (0.142 + 0.0764 * speed + 2.7 / (speed+0.000001)) * 0.001 taux=rhoa * Cd * speed * u tauy=rhoa * Cd * speed * v return taux, tauy def brunt_vaissala(rho, depth): """ Computes Brunt-Vaisalla frequency n2 = brunt_vaissala(rho) rho: rho profile [1D-array] depth: depth [1D-array] """ drho = np.gradient(rho) dz = np.gradient(depth) g = 9.8 rho0 = 1024 N2 = (g / rho0) * (drho / dz) return N2 def burger(N2, H, f, R): """ Computes Burger Number based on the ratio between baroclinic deformation radius and curvature radius of a promontory USAGE: Bu = burger(N2, H, f, R) INPUT: N2: brunt vaissalla frequency based on a mean rho profile of the jet H: undisturbed water depth f: coriolis parameter R: radius of curvature of the promontory """ Bu = (N2*H**2) / (f**2 * R**2) return Bu def rx1(z_w, rmask): """ function rx1 = rx1(z_w,rmask) This function computes the bathymetry slope from a SCRUM NetCDF file. On Input: z_w layer depth. rmask Land/Sea masking at RHO-points. On Output: rx1 Haney stiffness ratios. """ N, Lp, Mp = z_w.shape L=Lp-1 M=Mp-1 # Land/Sea mask on U-points. umask = np.zeros((L,Mp)) for j in range(Mp): for i in range(1,Lp): umask[i-1,j] = rmask[i,j] * rmask[i-1,j] # Land/Sea mask on V-points. vmask = np.zeros((Lp,M)) for j in range(1,Mp): for i in range(Lp): vmask[i,j-1] = rmask[i,j] * rmask[i,j-1] #------------------------------------------------------------------- # Compute R-factor. #------------------------------------------------------------------- zx = np.zeros((N,L,Mp)) zy = np.zeros((N,Lp,M)) for k in range(N): zx[k,:] = abs((z_w[k,1:,:] - z_w[k,:-1,:] + z_w[k-1,1:,:] - z_w[k-1,:-1,:]) / (z_w[k,1:,:] + z_w[k,:-1,:] - z_w[k-1,1:,:] - z_w[k-1,:-1,:])) zy[k,:] = abs((z_w[k,:,1:] - z_w[k,:,:-1] + z_w[k-1,:,1:] - z_w[k-1,:,:-1]) / (z_w[k,:,1:] + z_w[k,:,:-1] - z_w[k-1,:,1:] - z_w[k-1,:,:-1])) zx[k,:] = zx[k,:] * umask zy[k,:] = zy[k,:] * vmask r = np.maximum(np.maximum(zx[:,:,:-1],zx[:,:,1:]), np.maximum(zy[:,:-1,:],zy[:,1:,:])) rx1 = np.amax(r, axis=0) rmin = rx1.min() rmax = rx1.max() ravg = rx1.mean() rmed = np.median(rx1) print ' ' print 'Minimum r-value = ', rmin print 'Maximum r-value = ', rmax print 'Mean r-value = ', ravg print 'Median r-value = ', rmed return rx1 def rx0(h, rmask): """ function rx0 = rx0(h,rmask) This function computes the bathymetry slope from a SCRUM NetCDF file. On Input: h bathymetry at RHO-points. rmask Land/Sea masking at RHO-points. On Output: rx0 Beckmann and Haidvogel grid stiffness ratios. """ Mp, Lp = h.shape L = Lp-1 M = Mp-1 # Land/Sea mask on U-points. umask = np.zeros((Mp,L)) for j in range(Mp): for i in range(1,Lp): umask[j,i-1] = rmask[j,i] * rmask[j,i-1] # Land/Sea mask on V-points. vmask = np.zeros((M,Lp)) for j in range(1,Mp): for i in range(Lp): vmask[j-1,i] = rmask[j,i] * rmask[j-1,i] #------------------------------------------------------------------- # Compute R-factor. #------------------------------------------------------------------- hx = np.zeros((Mp,L)) hy = np.zeros((M,Lp)) hx = abs(h[:,1:] - h[:,:-1]) / (h[:,1:] + h[:,:-1]) hy = abs(h[1:,:] - h[:-1,:]) / (h[1:,:] + h[:-1,:]) hx = hx * umask hy = hy * vmask rx0 = np.maximum(np.maximum(hx[:-1,:],hx[1:,:]),np.maximum(hy[:,:-1],hy[:,1:])) rmin = rx0.min() rmax = rx0.max() ravg = rx0.mean() rmed = np.median(rx0) print ' ' print 'Minimum r-value = ', rmin print 'Maximum r-value = ', rmax print 'Mean r-value = ', ravg print 'Median r-value = ', rmed return rx0 def stretching(sc, Vstretching, theta_s, theta_b): """ Computes S-coordinates INPUT: sc : normalized levels [ndarray] Vstretching : ROMS stretching algorithm [int] theta_s : [int] theta_b : [int] hc : [int] """ if Vstretching == 1: # Song and Haidvogel, 1994 cff1 = 1. / np.sinh(theta_s) cff2 = 0.5 / np.tanh(0.5*theta_s) C = (1.-theta_b) * cff1 * np.sinh(theta_s * sc) + \ theta_b * (cff2 * np.tanh( theta_s * (sc + 0.5) ) - 0.5) return C if Vstretching == 4: # A. Shchepetkin (UCLA-ROMS, 2010) double vertical stretching function if theta_s > 0: Csur = ( 1.0 - np.cosh(theta_s*sc) ) / ( np.cosh(theta_s) -1.0 ) else: Csur = -sc**2 if theta_b > 0: Cbot = ( np.exp(theta_b*Csur)-1.0 ) / ( 1.0-np.exp(-theta_b) ) return Cbot else: return Csur def get_zlev(h, sigma, hc, sc, ssh=0., Vtransform=2): if Vtransform == 1: # ROMS 1999 hinv = 1./h cff = hc * (sc - sigma) if len(h.shape) > 1: z0 = cff[:,None,None] + sigma[:,None,None] * h[None,:,:] else: z0 = cff[:,None] + sigma[:,None] * h[None,:] return z0 + ssh * (1. + z0*hinv) elif Vtransform == 2: # ROMS 2005 if len(h.shape) > 1: z0 = ( hc*sc[:,None,None] + sigma[:,None,None]*h[None,:,:] ) / ( h[None,:,:] + hc ) else: z0 = ( hc*sc[:,None] + sigma[:,None]*h[None,:] ) / ( h[None,:] + hc ) return ssh + (ssh + h) * z0
rsoutelino/romslab
romslab.py
Python
mit
76,507
[ "NetCDF" ]
a3cf0f378c8bc18ab5c9f66e53122cd0b6b6919739578e90e127d8c5515a1ca1
# Copyright (c) 2003-2014 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ %prog [options] module_or_package Check that a module satisfies a coding standard (and more !). %prog --help Display this help message and exit. %prog --help-msg <msg-id>[,<msg-id>] Display help messages about given message identifiers and exit. """ from __future__ import print_function import collections import contextlib import itertools import operator import os try: import multiprocessing except ImportError: multiprocessing = None import sys import tokenize import warnings import astroid from astroid.__pkginfo__ import version as astroid_version from astroid import modutils from logilab.common import configuration from logilab.common import optik_ext from logilab.common import interface from logilab.common import textutils from logilab.common import ureports from logilab.common.__pkginfo__ import version as common_version import six from pylint import checkers from pylint import interfaces from pylint import reporters from pylint import utils from pylint import config from pylint.__pkginfo__ import version MANAGER = astroid.MANAGER def _get_new_args(message): location = ( message.abspath, message.path, message.module, message.obj, message.line, message.column, ) return ( message.msg_id, message.symbol, location, message.msg, message.confidence, ) def _get_python_path(filepath): dirname = os.path.realpath(os.path.expanduser(filepath)) if not os.path.isdir(dirname): dirname = os.path.dirname(dirname) while True: if not os.path.exists(os.path.join(dirname, "__init__.py")): return dirname old_dirname = dirname dirname = os.path.dirname(dirname) if old_dirname == dirname: return os.getcwd() def _merge_stats(stats): merged = {} for stat in stats: for key, item in six.iteritems(stat): if key not in merged: merged[key] = item else: if isinstance(item, dict): merged[key].update(item) else: merged[key] = merged[key] + item return merged @contextlib.contextmanager def _patch_sysmodules(): # Context manager that permits running pylint, on Windows, with -m switch # and with --jobs, as in 'python -2 -m pylint .. --jobs'. # For more details why this is needed, # see Python issue http://bugs.python.org/issue10845. mock_main = __name__ != '__main__' # -m switch if mock_main: sys.modules['__main__'] = sys.modules[__name__] try: yield finally: if mock_main: sys.modules.pop('__main__') # Python Linter class ######################################################### MSGS = { 'F0001': ('%s', 'fatal', 'Used when an error occurred preventing the analysis of a \ module (unable to find it for instance).'), 'F0002': ('%s: %s', 'astroid-error', 'Used when an unexpected error occurred while building the ' 'Astroid representation. This is usually accompanied by a ' 'traceback. Please report such errors !'), 'F0003': ('ignored builtin module %s', 'ignored-builtin-module', 'Used to indicate that the user asked to analyze a builtin ' 'module which has been skipped.'), 'F0010': ('error while code parsing: %s', 'parse-error', 'Used when an exception occured while building the Astroid ' 'representation which could be handled by astroid.'), 'I0001': ('Unable to run raw checkers on built-in module %s', 'raw-checker-failed', 'Used to inform that a built-in module has not been checked ' 'using the raw checkers.'), 'I0010': ('Unable to consider inline option %r', 'bad-inline-option', 'Used when an inline option is either badly formatted or can\'t ' 'be used inside modules.'), 'I0011': ('Locally disabling %s (%s)', 'locally-disabled', 'Used when an inline option disables a message or a messages ' 'category.'), 'I0012': ('Locally enabling %s (%s)', 'locally-enabled', 'Used when an inline option enables a message or a messages ' 'category.'), 'I0013': ('Ignoring entire file', 'file-ignored', 'Used to inform that the file will not be checked'), 'I0020': ('Suppressed %s (from line %d)', 'suppressed-message', 'A message was triggered on a line, but suppressed explicitly ' 'by a disable= comment in the file. This message is not ' 'generated for messages that are ignored due to configuration ' 'settings.'), 'I0021': ('Useless suppression of %s', 'useless-suppression', 'Reported when a message is explicitly disabled for a line or ' 'a block of code, but never triggered.'), 'I0022': ('Pragma "%s" is deprecated, use "%s" instead', 'deprecated-pragma', 'Some inline pylint options have been renamed or reworked, ' 'only the most recent form should be used. ' 'NOTE:skip-all is only available with pylint >= 0.26', {'old_names': [('I0014', 'deprecated-disable-all')]}), 'E0001': ('%s', 'syntax-error', 'Used when a syntax error is raised for a module.'), 'E0011': ('Unrecognized file option %r', 'unrecognized-inline-option', 'Used when an unknown inline option is encountered.'), 'E0012': ('Bad option value %r', 'bad-option-value', 'Used when a bad value for an inline option is encountered.'), } def _deprecated_option(shortname, opt_type): def _warn_deprecated(option, optname, *args): # pylint: disable=unused-argument sys.stderr.write('Warning: option %s is deprecated and ignored.\n' % (optname,)) return {'short': shortname, 'help': 'DEPRECATED', 'hide': True, 'type': opt_type, 'action': 'callback', 'callback': _warn_deprecated} if multiprocessing is not None: class ChildLinter(multiprocessing.Process): # pylint: disable=no-member def run(self): tasks_queue, results_queue, self._config = self._args # pylint: disable=no-member self._config["jobs"] = 1 # Child does not parallelize any further. self._python3_porting_mode = self._config.pop( 'python3_porting_mode', None) # Run linter for received files/modules. for file_or_module in iter(tasks_queue.get, 'STOP'): result = self._run_linter(file_or_module[0]) try: results_queue.put(result) except Exception as ex: print("internal error with sending report for module %s" % file_or_module, file=sys.stderr) print(ex, file=sys.stderr) results_queue.put({}) def _run_linter(self, file_or_module): linter = PyLinter() # Register standard checkers. linter.load_default_plugins() # Load command line plugins. # TODO linter.load_plugin_modules(self._plugins) linter.load_configuration(**self._config) linter.set_reporter(reporters.CollectingReporter()) # Enable the Python 3 checker mode. This option is # passed down from the parent linter up to here, since # the Python 3 porting flag belongs to the Run class, # instead of the Linter class. if self._python3_porting_mode: linter.python3_porting_mode() # Run the checks. linter.check(file_or_module) msgs = [_get_new_args(m) for m in linter.reporter.messages] return (file_or_module, linter.file_state.base_name, linter.current_name, msgs, linter.stats, linter.msg_status) class PyLinter(configuration.OptionsManagerMixIn, utils.MessagesHandlerMixIn, utils.ReportsHandlerMixIn, checkers.BaseTokenChecker): """lint Python modules using external checkers. This is the main checker controlling the other ones and the reports generation. It is itself both a raw checker and an astroid checker in order to: * handle message activation / deactivation at the module level * handle some basic but necessary stats'data (number of classes, methods...) IDE plugins developpers: you may have to call `astroid.builder.MANAGER.astroid_cache.clear()` accross run if you want to ensure the latest code version is actually checked. """ __implements__ = (interfaces.ITokenChecker, ) name = 'master' priority = 0 level = 0 msgs = MSGS @staticmethod def make_options(): return (('ignore', {'type' : 'csv', 'metavar' : '<file>[,<file>...]', 'dest' : 'black_list', 'default' : ('CVS',), 'help' : 'Add files or directories to the blacklist. ' 'They should be base names, not paths.'}), ('persistent', {'default': True, 'type' : 'yn', 'metavar' : '<y_or_n>', 'level': 1, 'help' : 'Pickle collected data for later comparisons.'}), ('load-plugins', {'type' : 'csv', 'metavar' : '<modules>', 'default' : (), 'level': 1, 'help' : 'List of plugins (as comma separated values of ' 'python modules names) to load, usually to register ' 'additional checkers.'}), ('output-format', {'default': 'text', 'type': 'string', 'metavar' : '<format>', 'short': 'f', 'group': 'Reports', 'help' : 'Set the output format. Available formats are text,' ' parseable, colorized, msvs (visual studio) and html. You ' 'can also give a reporter class, eg mypackage.mymodule.' 'MyReporterClass.'}), ('files-output', {'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>', 'group': 'Reports', 'level': 1, 'help' : 'Put messages in a separate file for each module / ' 'package specified on the command line instead of printing ' 'them on stdout. Reports (if any) will be written in a file ' 'name "pylint_global.[txt|html]".'}), ('reports', {'default': 1, 'type' : 'yn', 'metavar' : '<y_or_n>', 'short': 'r', 'group': 'Reports', 'help' : 'Tells whether to display a full report or only the ' 'messages'}), ('evaluation', {'type' : 'string', 'metavar' : '<python_expression>', 'group': 'Reports', 'level': 1, 'default': '10.0 - ((float(5 * error + warning + refactor + ' 'convention) / statement) * 10)', 'help' : 'Python expression which should return a note less ' 'than 10 (10 is the highest note). You have access ' 'to the variables errors warning, statement which ' 'respectively contain the number of errors / ' 'warnings messages and the total number of ' 'statements analyzed. This is used by the global ' 'evaluation report (RP0004).'}), ('comment', {'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>', 'group': 'Reports', 'level': 1, 'help' : 'Add a comment according to your evaluation note. ' 'This is used by the global evaluation report (RP0004).'}), ('confidence', {'type' : 'multiple_choice', 'metavar': '<levels>', 'default': '', 'choices': [c.name for c in interfaces.CONFIDENCE_LEVELS], 'group': 'Messages control', 'help' : 'Only show warnings with the listed confidence levels.' ' Leave empty to show all. Valid levels: %s' % ( ', '.join(c.name for c in interfaces.CONFIDENCE_LEVELS),)}), ('enable', {'type' : 'csv', 'metavar': '<msg ids>', 'short': 'e', 'group': 'Messages control', 'help' : 'Enable the message, report, category or checker with the ' 'given id(s). You can either give multiple identifier ' 'separated by comma (,) or put this option multiple time. ' 'See also the "--disable" option for examples. '}), ('disable', {'type' : 'csv', 'metavar': '<msg ids>', 'short': 'd', 'group': 'Messages control', 'help' : 'Disable the message, report, category or checker ' 'with the given id(s). You can either give multiple identifiers' ' separated by comma (,) or put this option multiple times ' '(only on the command line, not in the configuration file ' 'where it should appear only once).' 'You can also use "--disable=all" to disable everything first ' 'and then reenable specific checks. For example, if you want ' 'to run only the similarities checker, you can use ' '"--disable=all --enable=similarities". ' 'If you want to run only the classes checker, but have no ' 'Warning level messages displayed, use' '"--disable=all --enable=classes --disable=W"'}), ('msg-template', {'type' : 'string', 'metavar': '<template>', 'group': 'Reports', 'help' : ('Template used to display messages. ' 'This is a python new-style format string ' 'used to format the message information. ' 'See doc for all details') }), ('include-ids', _deprecated_option('i', 'yn')), ('symbols', _deprecated_option('s', 'yn')), ('jobs', {'type' : 'int', 'metavar': '<n-processes>', 'short': 'j', 'default': 1, 'help' : '''Use multiple processes to speed up Pylint.''', }), ('unsafe-load-any-extension', {'type': 'yn', 'metavar': '<yn>', 'default': False, 'hide': True, 'help': ('Allow loading of arbitrary C extensions. Extensions' ' are imported into the active Python interpreter and' ' may run arbitrary code.')}), ('extension-pkg-whitelist', {'type': 'csv', 'metavar': '<pkg[,pkg]>', 'default': [], 'help': ('A comma-separated list of package or module names' ' from where C extensions may be loaded. Extensions are' ' loading into the active Python interpreter and may run' ' arbitrary code')} ), ) option_groups = ( ('Messages control', 'Options controling analysis messages'), ('Reports', 'Options related to output formating and reporting'), ) def __init__(self, options=(), reporter=None, option_groups=(), pylintrc=None): # some stuff has to be done before ancestors initialization... # # messages store / checkers / reporter / astroid manager self.msgs_store = utils.MessagesStore() self.reporter = None self._reporter_name = None self._reporters = {} self._checkers = collections.defaultdict(list) self._pragma_lineno = {} self._ignore_file = False # visit variables self.file_state = utils.FileState() self.current_name = None self.current_file = None self.stats = None # init options self._external_opts = options self.options = options + PyLinter.make_options() self.option_groups = option_groups + PyLinter.option_groups self._options_methods = { 'enable': self.enable, 'disable': self.disable} self._bw_options_methods = {'disable-msg': self.disable, 'enable-msg': self.enable} full_version = '%%prog %s, \nastroid %s, common %s\nPython %s' % ( version, astroid_version, common_version, sys.version) configuration.OptionsManagerMixIn.__init__( self, usage=__doc__, version=full_version, config_file=pylintrc or config.PYLINTRC) utils.MessagesHandlerMixIn.__init__(self) utils.ReportsHandlerMixIn.__init__(self) checkers.BaseTokenChecker.__init__(self) # provided reports self.reports = (('RP0001', 'Messages by category', report_total_messages_stats), ('RP0002', '% errors / warnings by module', report_messages_by_module_stats), ('RP0003', 'Messages', report_messages_stats), ('RP0004', 'Global evaluation', self.report_evaluation), ) self.register_checker(self) self._dynamic_plugins = set() self._python3_porting_mode = False self._error_mode = False self.load_provider_defaults() if reporter: self.set_reporter(reporter) def load_default_plugins(self): checkers.initialize(self) reporters.initialize(self) # Make sure to load the default reporter, because # the option has been set before the plugins had been loaded. if not self.reporter: self._load_reporter() def load_plugin_modules(self, modnames): """take a list of module names which are pylint plugins and load and register them """ for modname in modnames: if modname in self._dynamic_plugins: continue self._dynamic_plugins.add(modname) module = modutils.load_module_from_name(modname) module.register(self) def _load_reporter(self): name = self._reporter_name.lower() if name in self._reporters: self.set_reporter(self._reporters[name]()) else: qname = self._reporter_name module = modutils.load_module_from_name( modutils.get_module_part(qname)) class_name = qname.split('.')[-1] reporter_class = getattr(module, class_name) self.set_reporter(reporter_class()) def set_reporter(self, reporter): """set the reporter used to display messages and reports""" self.reporter = reporter reporter.linter = self def set_option(self, optname, value, action=None, optdict=None): """overridden from configuration.OptionsProviderMixin to handle some special options """ if optname in self._options_methods or \ optname in self._bw_options_methods: if value: try: meth = self._options_methods[optname] except KeyError: meth = self._bw_options_methods[optname] warnings.warn('%s is deprecated, replace it by %s' % ( optname, optname.split('-')[0]), DeprecationWarning) value = optik_ext.check_csv(None, optname, value) if isinstance(value, (list, tuple)): for _id in value: meth(_id, ignore_unknown=True) else: meth(value) return # no need to call set_option, disable/enable methods do it elif optname == 'output-format': self._reporter_name = value # If the reporters are already available, load # the reporter class. if self._reporters: self._load_reporter() try: checkers.BaseTokenChecker.set_option(self, optname, value, action, optdict) except configuration.UnsupportedAction: print('option %s can\'t be read from config file' % \ optname, file=sys.stderr) def register_reporter(self, reporter_class): self._reporters[reporter_class.name] = reporter_class def report_order(self): reports = sorted(self._reports, key=lambda x: getattr(x, 'name', '')) try: # Remove the current reporter and add it # at the end of the list. reports.pop(reports.index(self)) except ValueError: pass else: reports.append(self) return reports # checkers manipulation methods ############################################ def register_checker(self, checker): """register a new checker checker is an object implementing IRawChecker or / and IAstroidChecker """ assert checker.priority <= 0, 'checker priority can\'t be >= 0' self._checkers[checker.name].append(checker) for r_id, r_title, r_cb in checker.reports: self.register_report(r_id, r_title, r_cb, checker) self.register_options_provider(checker) if hasattr(checker, 'msgs'): self.msgs_store.register_messages(checker) checker.load_defaults() # Register the checker, but disable all of its messages. # TODO(cpopa): we should have a better API for this. if not getattr(checker, 'enabled', True): self.disable(checker.name) def disable_noerror_messages(self): for msgcat, msgids in six.iteritems(self.msgs_store._msgs_by_category): if msgcat == 'E': for msgid in msgids: self.enable(msgid) else: for msgid in msgids: self.disable(msgid) def disable_reporters(self): """disable all reporters""" for reporters in six.itervalues(self._reports): for report_id, _, _ in reporters: self.disable_report(report_id) def error_mode(self): """error mode: enable only errors; no reports, no persistent""" self._error_mode = True self.disable_noerror_messages() self.disable('miscellaneous') if self._python3_porting_mode: self.disable('all') for msg_id in self._checker_messages('python3'): if msg_id.startswith('E'): self.enable(msg_id) else: self.disable('python3') self.set_option('reports', False) self.set_option('persistent', False) def python3_porting_mode(self): """Disable all other checkers and enable Python 3 warnings.""" self.disable('all') self.enable('python3') if self._error_mode: # The error mode was activated, using the -E flag. # So we'll need to enable only the errors from the # Python 3 porting checker. for msg_id in self._checker_messages('python3'): if msg_id.startswith('E'): self.enable(msg_id) else: self.disable(msg_id) self._python3_porting_mode = True # block level option handling ############################################# # # see func_block_disable_msg.py test case for expected behaviour def process_tokens(self, tokens): """process tokens from the current module to search for module/block level options """ control_pragmas = {'disable', 'enable'} for (tok_type, content, start, _, _) in tokens: if tok_type != tokenize.COMMENT: continue match = utils.OPTION_RGX.search(content) if match is None: continue if match.group(1).strip() == "disable-all" or \ match.group(1).strip() == 'skip-file': if match.group(1).strip() == "disable-all": self.add_message('deprecated-pragma', line=start[0], args=('disable-all', 'skip-file')) self.add_message('file-ignored', line=start[0]) self._ignore_file = True return try: opt, value = match.group(1).split('=', 1) except ValueError: self.add_message('bad-inline-option', args=match.group(1).strip(), line=start[0]) continue opt = opt.strip() if opt in self._options_methods or opt in self._bw_options_methods: try: meth = self._options_methods[opt] except KeyError: meth = self._bw_options_methods[opt] # found a "(dis|en)able-msg" pragma deprecated suppresssion self.add_message('deprecated-pragma', line=start[0], args=(opt, opt.replace('-msg', ''))) for msgid in textutils.splitstrip(value): # Add the line where a control pragma was encountered. if opt in control_pragmas: self._pragma_lineno[msgid] = start[0] try: if (opt, msgid) == ('disable', 'all'): self.add_message('deprecated-pragma', line=start[0], args=('disable=all', 'skip-file')) self.add_message('file-ignored', line=start[0]) self._ignore_file = True return meth(msgid, 'module', start[0]) except utils.UnknownMessage: self.add_message('bad-option-value', args=msgid, line=start[0]) else: self.add_message('unrecognized-inline-option', args=opt, line=start[0]) # code checking methods ################################################### def get_checkers(self): """return all available checkers as a list""" return [self] + [c for checkers in six.itervalues(self._checkers) for c in checkers if c is not self] def prepare_checkers(self): """return checkers needed for activated messages and reports""" if not self.config.reports: self.disable_reporters() # get needed checkers neededcheckers = [self] for checker in self.get_checkers()[1:]: # fatal errors should not trigger enable / disabling a checker messages = set(msg for msg in checker.msgs if msg[0] != 'F' and self.is_message_enabled(msg)) if (messages or any(self.report_is_enabled(r[0]) for r in checker.reports)): neededcheckers.append(checker) # Sort checkers by priority neededcheckers = sorted(neededcheckers, key=operator.attrgetter('priority'), reverse=True) return neededcheckers def should_analyze_file(self, modname, path): # pylint: disable=unused-argument, no-self-use """Returns whether or not a module should be checked. This implementation returns True for all python source file, indicating that all files should be linted. Subclasses may override this method to indicate that modules satisfying certain conditions should not be linted. :param str modname: The name of the module to be checked. :param str path: The full path to the source code of the module. :returns: True if the module should be checked. :rtype: bool """ return path.endswith('.py') def check(self, files_or_modules): """main checking entry: check a list of files or modules from their name. """ # initialize msgs_state now that all messages have been registered into # the store for msg in self.msgs_store.messages: if not msg.may_be_emitted(): self._msgs_state[msg.msgid] = False if not isinstance(files_or_modules, (list, tuple)): files_or_modules = (files_or_modules,) if self.config.jobs == 1: self._do_check(files_or_modules) else: with _patch_sysmodules(): self._parallel_check(files_or_modules) def _parallel_task(self, files_or_modules): # Prepare configuration for child linters. filter_options = {'symbols', 'include-ids', 'long-help'} filter_options.update([opt_name for opt_name, _ in self._external_opts]) config = {} for opt_providers in six.itervalues(self._all_options): for optname, optdict, val in opt_providers.options_and_values(): if optname not in filter_options: config[optname] = configuration.format_option_value(optdict, val) config['python3_porting_mode'] = self._python3_porting_mode childs = [] manager = multiprocessing.Manager() # pylint: disable=no-member tasks_queue = manager.Queue() # pylint: disable=no-member results_queue = manager.Queue() # pylint: disable=no-member for _ in range(self.config.jobs): cl = ChildLinter(args=(tasks_queue, results_queue, config)) cl.start() # pylint: disable=no-member childs.append(cl) # send files to child linters for files_or_module in files_or_modules: tasks_queue.put([files_or_module]) # collect results from child linters failed = False for _ in files_or_modules: try: result = results_queue.get() except Exception as ex: print("internal error while receiving results from child linter", file=sys.stderr) print(ex, file=sys.stderr) failed = True break yield result # Stop child linters and wait for their completion. for _ in range(self.config.jobs): tasks_queue.put('STOP') for cl in childs: cl.join() if failed: print("Error occured, stopping the linter.", file=sys.stderr) sys.exit(32) def _parallel_check(self, files_or_modules): # Reset stats. self.open() all_stats = [] for result in self._parallel_task(files_or_modules): ( file_or_module, self.file_state.base_name, module, messages, stats, msg_status ) = result if file_or_module == files_or_modules[-1]: last_module = module for msg in messages: msg = utils.Message(*msg) self.set_current_module(module) self.reporter.handle_message(msg) all_stats.append(stats) self.msg_status |= msg_status self.stats = _merge_stats(itertools.chain(all_stats, [self.stats])) self.current_name = last_module # Insert stats data to local checkers. for checker in self.get_checkers(): if checker is not self: checker.stats = self.stats def _do_check(self, files_or_modules): walker = utils.PyLintASTWalker(self) checkers = self.prepare_checkers() tokencheckers = [c for c in checkers if interface.implements(c, interfaces.ITokenChecker) and c is not self] rawcheckers = [c for c in checkers if interface.implements(c, interfaces.IRawChecker)] # notify global begin for checker in checkers: checker.open() if interface.implements(checker, interfaces.IAstroidChecker): walker.add_checker(checker) # build ast and check modules or packages for descr in self.expand_files(files_or_modules): modname, filepath = descr['name'], descr['path'] if not descr['isarg'] and not self.should_analyze_file(modname, filepath): continue if self.config.files_output: reportfile = 'pylint_%s.%s' % (modname, self.reporter.extension) self.reporter.set_output(open(reportfile, 'w')) self.set_current_module(modname, filepath) # get the module representation ast_node = self.get_ast(filepath, modname) if ast_node is None: continue # XXX to be correct we need to keep module_msgs_state for every # analyzed module (the problem stands with localized messages which # are only detected in the .close step) self.file_state = utils.FileState(descr['basename']) self._ignore_file = False # fix the current file (if the source file was not available or # if it's actually a c extension) self.current_file = ast_node.file # pylint: disable=maybe-no-member self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers) # warn about spurious inline messages handling spurious_messages = self.file_state.iter_spurious_suppression_messages(self.msgs_store) for msgid, line, args in spurious_messages: self.add_message(msgid, line, None, args) # notify global end self.stats['statement'] = walker.nbstatements checkers.reverse() for checker in checkers: checker.close() def expand_files(self, modules): """get modules and errors from a list of modules and handle errors """ result, errors = utils.expand_modules(modules, self.config.black_list) for error in errors: message = modname = error["mod"] key = error["key"] self.set_current_module(modname) if key == "fatal": message = str(error["ex"]).replace(os.getcwd() + os.sep, '') self.add_message(key, args=message) return result def set_current_module(self, modname, filepath=None): """set the name of the currently analyzed module and init statistics for it """ if not modname and filepath is None: return self.reporter.on_set_current_module(modname, filepath) self.current_name = modname self.current_file = filepath or modname self.stats['by_module'][modname] = {} self.stats['by_module'][modname]['statement'] = 0 for msg_cat in six.itervalues(utils.MSG_TYPES): self.stats['by_module'][modname][msg_cat] = 0 def get_ast(self, filepath, modname): """return a ast(roid) representation for a module""" try: return MANAGER.ast_from_file(filepath, modname, source=True) except SyntaxError as ex: self.add_message('syntax-error', line=ex.lineno, args=ex.msg) except astroid.AstroidBuildingException as ex: self.add_message('parse-error', args=ex) except Exception as ex: # pylint: disable=broad-except import traceback traceback.print_exc() self.add_message('astroid-error', args=(ex.__class__, ex)) def check_astroid_module(self, ast_node, walker, rawcheckers, tokencheckers): """Check a module from its astroid representation.""" try: tokens = utils.tokenize_module(ast_node) except tokenize.TokenError as ex: self.add_message('syntax-error', line=ex.args[1][0], args=ex.args[0]) return if not ast_node.pure_python: self.add_message('raw-checker-failed', args=ast_node.name) else: #assert astroid.file.endswith('.py') # invoke ITokenChecker interface on self to fetch module/block # level options self.process_tokens(tokens) if self._ignore_file: return False # walk ast to collect line numbers self.file_state.collect_block_lines(self.msgs_store, ast_node) # run raw and tokens checkers for checker in rawcheckers: checker.process_module(ast_node) for checker in tokencheckers: checker.process_tokens(tokens) # generate events to astroid checkers walker.walk(ast_node) return True # IAstroidChecker interface ################################################# def open(self): """initialize counters""" self.stats = {'by_module' : {}, 'by_msg' : {}, } MANAGER.always_load_extensions = self.config.unsafe_load_any_extension MANAGER.extension_package_whitelist.update( self.config.extension_pkg_whitelist) for msg_cat in six.itervalues(utils.MSG_TYPES): self.stats[msg_cat] = 0 def generate_reports(self): """close the whole package /module, it's time to make reports ! if persistent run, pickle results for later comparison """ if self.file_state.base_name is not None: # load previous results if any previous_stats = config.load_results(self.file_state.base_name) # XXX code below needs refactoring to be more reporter agnostic self.reporter.on_close(self.stats, previous_stats) if self.config.reports: sect = self.make_reports(self.stats, previous_stats) if self.config.files_output: filename = 'pylint_global.' + self.reporter.extension self.reporter.set_output(open(filename, 'w')) else: sect = ureports.Section() if self.config.reports or self.config.output_format == 'html': self.reporter.display_results(sect) # save results if persistent run if self.config.persistent: config.save_results(self.stats, self.file_state.base_name) else: if self.config.output_format == 'html': # No output will be emitted for the html # reporter if the file doesn't exist, so emit # the results here. self.reporter.display_results(ureports.Section()) self.reporter.on_close(self.stats, {}) # specific reports ######################################################## def report_evaluation(self, sect, stats, previous_stats): """make the global evaluation report""" # check with at least check 1 statements (usually 0 when there is a # syntax error preventing pylint from further processing) if stats['statement'] == 0: raise utils.EmptyReport() # get a global note for the code evaluation = self.config.evaluation try: note = eval(evaluation, {}, self.stats) # pylint: disable=eval-used except Exception as ex: # pylint: disable=broad-except msg = 'An exception occurred while rating: %s' % ex else: stats['global_note'] = note msg = 'Your code has been rated at %.2f/10' % note pnote = previous_stats.get('global_note') if pnote is not None: msg += ' (previous run: %.2f/10, %+.2f)' % (pnote, note - pnote) if self.config.comment: msg = '%s\n%s' % (msg, config.get_note_message(note)) sect.append(ureports.Text(msg)) # some reporting functions #################################################### def report_total_messages_stats(sect, stats, previous_stats): """make total errors / warnings report""" lines = ['type', 'number', 'previous', 'difference'] lines += checkers.table_lines_from_stats(stats, previous_stats, ('convention', 'refactor', 'warning', 'error')) sect.append(ureports.Table(children=lines, cols=4, rheaders=1)) def report_messages_stats(sect, stats, _): """make messages type report""" if not stats['by_msg']: # don't print this report when we didn't detected any errors raise utils.EmptyReport() in_order = sorted([(value, msg_id) for msg_id, value in six.iteritems(stats['by_msg']) if not msg_id.startswith('I')]) in_order.reverse() lines = ('message id', 'occurrences') for value, msg_id in in_order: lines += (msg_id, str(value)) sect.append(ureports.Table(children=lines, cols=2, rheaders=1)) def report_messages_by_module_stats(sect, stats, _): """make errors / warnings by modules report""" if len(stats['by_module']) == 1: # don't print this report when we are analysing a single module raise utils.EmptyReport() by_mod = collections.defaultdict(dict) for m_type in ('fatal', 'error', 'warning', 'refactor', 'convention'): total = stats[m_type] for module in six.iterkeys(stats['by_module']): mod_total = stats['by_module'][module][m_type] if total == 0: percent = 0 else: percent = float((mod_total)*100) / total by_mod[module][m_type] = percent sorted_result = [] for module, mod_info in six.iteritems(by_mod): sorted_result.append((mod_info['error'], mod_info['warning'], mod_info['refactor'], mod_info['convention'], module)) sorted_result.sort() sorted_result.reverse() lines = ['module', 'error', 'warning', 'refactor', 'convention'] for line in sorted_result: # Don't report clean modules. if all(entry == 0 for entry in line[:-1]): continue lines.append(line[-1]) for val in line[:-1]: lines.append('%.2f' % val) if len(lines) == 5: raise utils.EmptyReport() sect.append(ureports.Table(children=lines, cols=5, rheaders=1)) # utilities ################################################################### class ArgumentPreprocessingError(Exception): """Raised if an error occurs during argument preprocessing.""" def preprocess_options(args, search_for): """look for some options (keys of <search_for>) which have to be processed before others values of <search_for> are callback functions to call when the option is found """ i = 0 while i < len(args): arg = args[i] if arg.startswith('--'): try: option, val = arg[2:].split('=', 1) except ValueError: option, val = arg[2:], None try: cb, takearg = search_for[option] except KeyError: i += 1 else: del args[i] if takearg and val is None: if i >= len(args) or args[i].startswith('-'): msg = 'Option %s expects a value' % option raise ArgumentPreprocessingError(msg) val = args[i] del args[i] elif not takearg and val is not None: msg = "Option %s doesn't expects a value" % option raise ArgumentPreprocessingError(msg) cb(option, val) else: i += 1 @contextlib.contextmanager def fix_import_path(args): """Prepare sys.path for running the linter checks. Within this context, each of the given arguments is importable. Paths are added to sys.path in corresponding order to the arguments. We avoid adding duplicate directories to sys.path. `sys.path` is reset to its original value upon exitign this context. """ orig = list(sys.path) changes = [] for arg in args: path = _get_python_path(arg) if path in changes: continue else: changes.append(path) sys.path[:] = changes + sys.path try: yield finally: sys.path[:] = orig class Run(object): """helper class to use as main for pylint : run(*sys.argv[1:]) """ LinterClass = PyLinter option_groups = ( ('Commands', 'Options which are actually commands. Options in this \ group are mutually exclusive.'), ) def __init__(self, args, reporter=None, exit=True): self._rcfile = None self._plugins = [] try: preprocess_options(args, { # option: (callback, takearg) 'init-hook': (cb_init_hook, True), 'rcfile': (self.cb_set_rcfile, True), 'load-plugins': (self.cb_add_plugins, True), }) except ArgumentPreprocessingError as ex: print(ex, file=sys.stderr) sys.exit(32) self.linter = linter = self.LinterClass(( ('rcfile', {'action' : 'callback', 'callback' : lambda *args: 1, 'type': 'string', 'metavar': '<file>', 'help' : 'Specify a configuration file.'}), ('init-hook', {'action' : 'callback', 'callback' : lambda *args: 1, 'type' : 'string', 'metavar': '<code>', 'level': 1, 'help' : 'Python code to execute, usually for sys.path ' 'manipulation such as pygtk.require().'}), ('help-msg', {'action' : 'callback', 'type' : 'string', 'metavar': '<msg-id>', 'callback' : self.cb_help_message, 'group': 'Commands', 'help' : 'Display a help message for the given message id and ' 'exit. The value may be a comma separated list of message ids.'}), ('list-msgs', {'action' : 'callback', 'metavar': '<msg-id>', 'callback' : self.cb_list_messages, 'group': 'Commands', 'level': 1, 'help' : "Generate pylint's messages."}), ('list-conf-levels', {'action' : 'callback', 'callback' : cb_list_confidence_levels, 'group': 'Commands', 'level': 1, 'help' : "Generate pylint's messages."}), ('full-documentation', {'action' : 'callback', 'metavar': '<msg-id>', 'callback' : self.cb_full_documentation, 'group': 'Commands', 'level': 1, 'help' : "Generate pylint's full documentation."}), ('generate-rcfile', {'action' : 'callback', 'callback' : self.cb_generate_config, 'group': 'Commands', 'help' : 'Generate a sample configuration file according to ' 'the current configuration. You can put other options ' 'before this one to get them in the generated ' 'configuration.'}), ('generate-man', {'action' : 'callback', 'callback' : self.cb_generate_manpage, 'group': 'Commands', 'help' : "Generate pylint's man page.", 'hide': True}), ('errors-only', {'action' : 'callback', 'callback' : self.cb_error_mode, 'short': 'E', 'help' : 'In error mode, checkers without error messages are ' 'disabled and for others, only the ERROR messages are ' 'displayed, and no reports are done by default'''}), ('py3k', {'action' : 'callback', 'callback' : self.cb_python3_porting_mode, 'help' : 'In Python 3 porting mode, all checkers will be ' 'disabled and only messages emitted by the porting ' 'checker will be displayed'}), ('profile', {'type' : 'yn', 'metavar' : '<y_or_n>', 'default': False, 'hide': True, 'help' : 'Profiled execution.'}), ), option_groups=self.option_groups, pylintrc=self._rcfile) # register standard checkers linter.load_default_plugins() # load command line plugins linter.load_plugin_modules(self._plugins) # add some help section linter.add_help_section('Environment variables', config.ENV_HELP, level=1) # pylint: disable=bad-continuation linter.add_help_section('Output', 'Using the default text output, the message format is : \n' ' \n' ' MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE \n' ' \n' 'There are 5 kind of message types : \n' ' * (C) convention, for programming standard violation \n' ' * (R) refactor, for bad code smell \n' ' * (W) warning, for python specific problems \n' ' * (E) error, for probable bugs in the code \n' ' * (F) fatal, if an error occurred which prevented pylint from doing further\n' 'processing.\n' , level=1) linter.add_help_section('Output status code', 'Pylint should leave with following status code: \n' ' * 0 if everything went fine \n' ' * 1 if a fatal message was issued \n' ' * 2 if an error message was issued \n' ' * 4 if a warning message was issued \n' ' * 8 if a refactor message was issued \n' ' * 16 if a convention message was issued \n' ' * 32 on usage error \n' ' \n' 'status 1 to 16 will be bit-ORed so you can know which different categories has\n' 'been issued by analysing pylint output status code\n', level=1) # read configuration linter.disable('pointless-except') linter.disable('suppressed-message') linter.disable('useless-suppression') linter.read_config_file() config_parser = linter.cfgfile_parser # run init hook, if present, before loading plugins if config_parser.has_option('MASTER', 'init-hook'): cb_init_hook('init-hook', textutils.unquote(config_parser.get('MASTER', 'init-hook'))) # is there some additional plugins in the file configuration, in if config_parser.has_option('MASTER', 'load-plugins'): plugins = textutils.splitstrip( config_parser.get('MASTER', 'load-plugins')) linter.load_plugin_modules(plugins) # now we can load file config and command line, plugins (which can # provide options) have been registered linter.load_config_file() if reporter: # if a custom reporter is provided as argument, it may be overridden # by file parameters, so re-set it here, but before command line # parsing so it's still overrideable by command line option linter.set_reporter(reporter) try: args = linter.load_command_line_configuration(args) except SystemExit as exc: if exc.code == 2: # bad options exc.code = 32 raise if not args: print(linter.help()) sys.exit(32) if linter.config.jobs < 0: print("Jobs number (%d) should be greater than 0" % linter.config.jobs, file=sys.stderr) sys.exit(32) if linter.config.jobs > 1 or linter.config.jobs == 0: if multiprocessing is None: print("Multiprocessing library is missing, " "fallback to single process", file=sys.stderr) linter.set_option("jobs", 1) else: if linter.config.jobs == 0: linter.config.jobs = multiprocessing.cpu_count() # insert current working directory to the python path to have a correct # behaviour with fix_import_path(args): if self.linter.config.profile: print('** profiled run', file=sys.stderr) import cProfile, pstats cProfile.runctx('linter.check(%r)' % args, globals(), locals(), 'stones.prof') data = pstats.Stats('stones.prof') data.strip_dirs() data.sort_stats('time', 'calls') data.print_stats(30) else: linter.check(args) linter.generate_reports() if exit: sys.exit(self.linter.msg_status) def cb_set_rcfile(self, name, value): """callback for option preprocessing (i.e. before option parsing)""" self._rcfile = value def cb_add_plugins(self, name, value): """callback for option preprocessing (i.e. before option parsing)""" self._plugins.extend(textutils.splitstrip(value)) def cb_error_mode(self, *args, **kwargs): """error mode: * disable all but error messages * disable the 'miscellaneous' checker which can be safely deactivated in debug * disable reports * do not save execution information """ self.linter.error_mode() def cb_generate_config(self, *args, **kwargs): """optik callback for sample config file generation""" self.linter.generate_config(skipsections=('COMMANDS',)) sys.exit(0) def cb_generate_manpage(self, *args, **kwargs): """optik callback for sample config file generation""" from pylint import __pkginfo__ self.linter.generate_manpage(__pkginfo__) sys.exit(0) def cb_help_message(self, option, optname, value, parser): """optik callback for printing some help about a particular message""" self.linter.msgs_store.help_message(textutils.splitstrip(value)) sys.exit(0) def cb_full_documentation(self, option, optname, value, parser): """optik callback for printing full documentation""" self.linter.print_full_documentation() sys.exit(0) def cb_list_messages(self, option, optname, value, parser): # FIXME """optik callback for printing available messages""" self.linter.msgs_store.list_messages() sys.exit(0) def cb_python3_porting_mode(self, *args, **kwargs): """Activate only the python3 porting checker.""" self.linter.python3_porting_mode() def cb_list_confidence_levels(option, optname, value, parser): for level in interfaces.CONFIDENCE_LEVELS: print('%-18s: %s' % level) sys.exit(0) def cb_init_hook(optname, value): """exec arbitrary code to set sys.path for instance""" exec(value) # pylint: disable=exec-used if __name__ == '__main__': Run(sys.argv[1:])
GbalsaC/bitnamiP
venv/lib/python2.7/site-packages/pylint/lint.py
Python
agpl-3.0
58,101
[ "VisIt" ]
d0edc055500d8731999e4f501dd90561da3ca9a0efd8f677ee48fba877cb789c
#!/usr/bin/env python """ list FileCatalog file or directory """ import os from COMDIRAC.Interfaces import critical from COMDIRAC.Interfaces import DSession from COMDIRAC.Interfaces import createCatalog from COMDIRAC.Interfaces import pathFromArguments if __name__ == "__main__": import sys from DIRAC.Core.Base import Script class Params: def __init__ ( self ): self.long = False self.replicas = False self.time = False def setLong( self, arg = None ): self.long = True def getLong( self ): return self.long def setReplicas( self, arg = None ): self.replicas = True def getReplicas( self ): return self.replicas def setTime( self, arg = None ): self.time = True def getTime( self ): return self.time params = Params( ) Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1], 'Usage:', ' %s [options] [path]' % Script.scriptName, 'Arguments:', ' path: file/directory path', '', 'Examples:', ' $ dls', ' $ dls ..', ' $ dls /', ] ) ) Script.registerSwitch( "l", "long", "detailled listing", params.setLong ) Script.registerSwitch( "L", "list-replicas", "detailled listing with replicas", params.setReplicas ) Script.registerSwitch( "t", "time", "time based order", params.setTime ) Script.parseCommandLine( ignoreErrors = True ) args = Script.getPositionalArgs() from DIRAC.DataManagementSystem.Client.FileCatalogClientCLI import DirectoryListing, FileCatalogClientCLI class ReplicaDirectoryListing( DirectoryListing ): def addFileWithReplicas( self,name,fileDict,numericid, replicas ): """ Pretty print of the file ls output with replica info """ self.addFile( name, fileDict, replicas, numericid ) self.entries[ -1 ] += tuple( replicas ) def printListing( self,reverse,timeorder ): """ """ if timeorder: if reverse: self.entries.sort( key=lambda x: x[ 5 ] ) else: self.entries.sort( key=lambda x: x[ 5 ],reverse=True ) else: if reverse: self.entries.sort( key=lambda x: x[ 6 ],reverse=True ) else: self.entries.sort( key=lambda x: x[ 6 ] ) # Determine the field widths wList = [ 0 for x in range( 7 ) ] for d in self.entries: for i in range( 7 ): if len( str( d[ i ] )) > wList[ i ]: wList[ i ] = len( str( d[ i ] )) for e in self.entries: print str( e[ 0 ] ), print str( e[ 1 ] ).rjust( wList[ 1 ] ), print str( e[ 2 ] ).ljust( wList[ 2 ] ), print str( e[ 3 ] ).ljust( wList[ 3 ] ), print str( e[ 4 ] ).rjust( wList[ 2 ] ), print str( e[ 5 ] ).rjust( wList[ 3 ] ), print str( e[ 6 ] ) # print replicas if present if len( e ) > 7: for r in e[ 7: ]: print " ", r class ReplicaFileCatalogClientCLI( FileCatalogClientCLI ): def getReplicas( self, path ): replicas = [ ] try: result = self.fc.getReplicas( path ) if result[ 'OK' ]: if result[ 'Value' ][ 'Successful' ]: for se,entry in result[ 'Value' ][ 'Successful' ][ path ].items( ): replicas.append( se.ljust( 15 ) + " " + entry ) else: print "Replicas: ", result#[ 'Message' ] except Exception, x: replicas.append( "replicas failed:" + str( x )) return tuple( replicas ) def do_ls( self, args ): """ Lists directory entries at <path> usage: ls [ -ltrn ] <path> """ argss = args.split( ) # Get switches long = False reverse = False timeorder = False numericid = False path = self.cwd if len( argss ) > 0: if argss[ 0 ][ 0 ] == '-': if 'l' in argss[ 0 ]: long = True if 'r' in argss[ 0 ]: reverse = True if 't' in argss[ 0 ]: timeorder = True if 'n' in argss[ 0 ]: numericid = True del argss[ 0 ] # Get path if argss: path = argss[ 0 ] if path[ 0 ] != '/': path = self.cwd+'/'+path path = path.replace( r'//','/' ) # remove last character if it is "/" if path[ -1 ] == '/' and path != '/': path = path[ :-1 ] # Check if the target path is a file result = self.fc.isFile( path ) if not result[ 'OK' ]: print "Error: can not verify path" return elif path in result[ 'Value' ][ 'Successful' ] and result[ 'Value' ][ 'Successful' ][ path ]: result = self.fc.getFileMetadata( path ) dList = ReplicaDirectoryListing( ) fileDict = result[ 'Value' ][ 'Successful' ][ path ] replicas = self.getReplicas( path ) dList.addFileWithReplicas( os.path.basename( path ),fileDict,numericid, replicas ) dList.printListing( reverse,timeorder ) return result = self.fc.isDirectory( path ) if not result[ "OK" ]: print "Error: can not verify path" return elif path not in result[ 'Value' ][ 'Successful' ] or not result[ 'Value' ][ 'Successful' ][ path ]: print "Error: \"%s\" doesn't exist" % path return # Get directory contents now try: result = self.fc.listDirectory( path,long ) dList = ReplicaDirectoryListing( ) if result[ 'OK' ]: if result[ 'Value' ][ 'Successful' ]: for entry in result[ 'Value' ][ 'Successful' ][ path ][ 'Files' ]: fname = entry.split( '/' )[ -1 ] # print entry, fname # fname = entry.replace( self.cwd,'' ).replace( '/','' ) if long: fileDict = result[ 'Value' ][ 'Successful' ][ path ][ 'Files' ][ entry ][ 'MetaData' ] if fileDict: replicas = self.getReplicas( os.path.join( path, fname )) dList.addFileWithReplicas( fname,fileDict,numericid, replicas ) else: dList.addSimpleFile( fname ) for entry in result[ 'Value' ][ 'Successful' ][ path ][ 'SubDirs' ]: dname = entry.split( '/' )[ -1 ] # print entry, dname # dname = entry.replace( self.cwd,'' ).replace( '/','' ) if long: dirDict = result[ 'Value' ][ 'Successful' ][ path ][ 'SubDirs' ][ entry ] if dirDict: dList.addDirectory( dname,dirDict,numericid ) else: dList.addSimpleFile( dname ) for entry in result[ 'Value' ][ 'Successful' ][ path ][ 'Links' ]: pass if long: dList.printListing( reverse,timeorder ) else: dList.printOrdered( ) else: print "Error:",result[ 'Message' ] except Exception, x: print "Error:", str( x ) session = DSession( ) Script.enableCS( ) fccli = None if params.getReplicas( ): fccli = ReplicaFileCatalogClientCLI( createCatalog( ) ) params.setLong( None ) else: fccli = FileCatalogClientCLI( createCatalog( ) ) optstr = "" if params.long: optstr += "l" if params.time: optstr += "t" if optstr: optstr = "-" + optstr + " " for p in pathFromArguments( session, args ): print "%s:" % p fccli.do_ls( optstr + p )
pigay/COMDIRAC
Interfaces/scripts/dls.py
Python
gpl-3.0
7,966
[ "DIRAC" ]
b7516eb1b144baa390626ce2c545be349b8c31630e9b1b975912c2f71fdc0eff
""" Tools for the instructor dashboard """ import dateutil import json from django.conf import settings from django.contrib.auth.models import User from django.http import HttpResponseBadRequest from django.utils.timezone import utc from django.utils.translation import ugettext as _ from courseware.models import StudentFieldOverride from courseware.field_overrides import disable_overrides from courseware.student_field_overrides import ( clear_override_for_user, get_override_for_user, override_field_for_user, ) from xmodule.fields import Date from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.django import modulestore from opaque_keys.edx.keys import UsageKey DATE_FIELD = Date() class DashboardError(Exception): """ Errors arising from use of the instructor dashboard. """ def response(self): """ Generate an instance of HttpResponseBadRequest for this error. """ error = unicode(self) return HttpResponseBadRequest(json.dumps({'error': error})) def handle_dashboard_error(view): """ Decorator which adds seamless DashboardError handling to a view. If a DashboardError is raised during view processing, an HttpResponseBadRequest is sent back to the client with JSON data about the error. """ def wrapper(request, course_id): """ Wrap the view. """ try: return view(request, course_id=course_id) except DashboardError, error: return error.response() return wrapper def strip_if_string(value): if isinstance(value, basestring): return value.strip() return value def get_student_from_identifier(unique_student_identifier): """ Gets a student object using either an email address or username. Returns the student object associated with `unique_student_identifier` Raises User.DoesNotExist if no user object can be found. """ unique_student_identifier = strip_if_string(unique_student_identifier) if "@" in unique_student_identifier: student = User.objects.get(email=unique_student_identifier) else: student = User.objects.get(username=unique_student_identifier) return student def require_student_from_identifier(unique_student_identifier): """ Same as get_student_from_identifier() but will raise a DashboardError if the student does not exist. """ try: return get_student_from_identifier(unique_student_identifier) except User.DoesNotExist: raise DashboardError( _("Could not find student matching identifier: {student_identifier}").format( student_identifier=unique_student_identifier ) ) def parse_datetime(datestr): """ Convert user input date string into an instance of `datetime.datetime` in UTC. """ try: return dateutil.parser.parse(datestr).replace(tzinfo=utc) except ValueError: raise DashboardError(_("Unable to parse date: ") + datestr) def find_unit(course, url): """ Finds the unit (block, module, whatever the terminology is) with the given url in the course tree and returns the unit. Raises DashboardError if no unit is found. """ def find(node, url): """ Find node in course tree for url. """ if node.location.to_deprecated_string() == url: return node for child in node.get_children(): found = find(child, url) if found: return found return None unit = find(course, url) if unit is None: raise DashboardError(_("Couldn't find module for url: {0}").format(url)) return unit def get_units_with_due_date(course): """ Returns all top level units which have due dates. Does not return descendents of those nodes. """ units = [] def visit(node): """ Visit a node. Checks to see if node has a due date and appends to `units` if it does. Otherwise recurses into children to search for nodes with due dates. """ if getattr(node, 'due', None): units.append(node) else: for child in node.get_children(): visit(child) visit(course) #units.sort(key=_title_or_url) return units def title_or_url(node): """ Returns the `display_name` attribute of the passed in node of the course tree, if it has one. Otherwise returns the node's url. """ title = getattr(node, 'display_name', None) if not title: title = node.location.to_deprecated_string() return title def set_due_date_extension(course, unit, student, due_date): """ Sets a due date extension. Raises DashboardError if the unit or extended due date is invalid. """ if due_date: # Check that the new due date is valid: with disable_overrides(): original_due_date = getattr(unit, 'due', None) if not original_due_date: raise DashboardError(_("Unit {0} has no due date to extend.").format(unit.location)) if due_date < original_due_date: raise DashboardError(_("An extended due date must be later than the original due date.")) override_field_for_user(student, unit, 'due', due_date) else: # We are deleting a due date extension. Check that it exists: if not get_override_for_user(student, unit, 'due'): raise DashboardError(_("No due date extension is set for that student and unit.")) clear_override_for_user(student, unit, 'due') def dump_module_extensions(course, unit): """ Dumps data about students with due date extensions for a particular module, specified by 'url', in a particular course. """ data = [] header = [_("Username"), _("Full Name"), _("Extended Due Date")] query = StudentFieldOverride.objects.filter( course_id=course.id, location=unit.location, field='due') for override in query: due = DATE_FIELD.from_json(json.loads(override.value)) due = due.strftime("%Y-%m-%d %H:%M") fullname = override.student.profile.name data.append(dict(zip( header, (override.student.username, fullname, due)))) data.sort(key=lambda x: x[header[0]]) return { "header": header, "title": _("Users with due date extensions for {0}").format( title_or_url(unit)), "data": data } def dump_student_extensions(course, student): """ Dumps data about the due date extensions granted for a particular student in a particular course. """ data = [] header = [_("Unit"), _("Extended Due Date")] units = get_units_with_due_date(course) units = {u.location: u for u in units} query = StudentFieldOverride.objects.filter( course_id=course.id, student=student, field='due') for override in query: location = override.location.replace(course_key=course.id) if location not in units: continue due = DATE_FIELD.from_json(json.loads(override.value)) due = due.strftime("%Y-%m-%d %H:%M") title = title_or_url(units[location]) data.append(dict(zip(header, (title, due)))) return { "header": header, "title": _("Due date extensions for {0} {1} ({2})").format( student.first_name, student.last_name, student.username), "data": data} def add_block_ids(payload): """ rather than manually parsing block_ids from module_ids on the client, pass the block_ids explicitly in the payload """ if 'data' in payload: for ele in payload['data']: if 'module_id' in ele: ele['block_id'] = UsageKey.from_string(ele['module_id']).block_id
ampax/edx-platform
lms/djangoapps/instructor/views/tools.py
Python
agpl-3.0
7,903
[ "VisIt" ]
6bfcff3d06fc45c04eefa09b6dbc2fe0c146acdb2221cd35b09a346be18c269b
#!/usr/bin/env python """ Fit parameters of classical interatomic potentials using a metaheuristic algorithm. Because of computational efficiency, optimization of a lot of parameters using gradient-based approaches is out of focus in this program, and that should be performed in a Fortran program. Usage: {0:s} [options] Options: -h, --help Show this message and exit. --nproc NPROC Number of processes to be used. If it's less than 1, use as many processes as possible. [default: 0] --subdir-prefix PREFIX Prefix for pmd directory. [default: subdir_] --subjob-script SCRIPT Name of script that performs MD and post-processing. [default: subjob.sh] """ from __future__ import print_function import os import sys import shutil from docopt import docopt import numpy as np from numpy import sin,cos,sqrt import subprocess import time from datetime import datetime from nappy.fitpot.fp2prms import fp2BVSx, fp2BVS, fp2Morse, read_params_Coulomb, fp2params from nappy.fitpot.de import DE from nappy.fitpot.cs import CS from nappy.fitpot.tpe import TPE __author__ = "RYO KOBAYASHI" __version__ = "rev211111" def read_in_fitpot(fname='in.fitpot'): #...initialize infp = {} infp['rdf_match'] = True infp['adf_match'] = True infp['vol_match'] = True infp['lat_match'] = False infp['fval_upper_limit'] = 100.0 infp['missing_value'] = 1.0 infp['print_level'] = 1 infp['weights'] = {'rdf':1.0, 'adf':1.0, 'vol':1.0, 'lat':1.0} infp['update_vrange'] = -1 infp['param_file'] = 'in.vars.fitpot' mode = None specorder = None infp['interactions'] = [] infp['rdf_pairs'] = [] infp['adf_triplets'] = [] infp['match'] = [] infp['param_files'] = [] with open(fname,'r') as f: lines = f.readlines() for line in lines: if line[0] in ('!','#'): mode = None continue data = line.split() if len(data) == 0: mode = None continue if data[0] == 'num_iteration': maxiter = int(data[1]) infp['num_iteration'] = maxiter mode = None elif data[0] == 'print_level': print_level = int(data[1]) infp['print_level'] = print_level mode = None elif data[0] == 'fitting_method': fit_method = data[1] infp['fitting_method'] = fit_method mode = None elif data[0] == 'sample_directory': sampledir = data[1] if '"' in sampledir: sampledir = sampledir.replace('"','') elif "'" in sampledir: sampledir = sampledir.replace("'",'') infp['sample_directory'] = sampledir mode = None # elif data[0] == 'param_file': # prmfile = data[1] # infp['param_file'] = prmfile # mode = None elif data[0] == 'param_files': infp[data[0]] = [ name for name in data[1:] ] mode = None elif data[0] == 'potential': potential = data[1] infp['potential'] = potential mode = None elif data[0] == 'fval_upper_limit': fup_limit = float(data[1]) infp['fval_upper_limit'] = fup_limit mode = None elif data[0] == 'missing_value': misval = float(data[1]) infp['missing_value'] = misval mode = None elif data[0] == 'specorder': specorder = data[1:] infp['specorder'] = specorder mode = None elif data[0] == 'interactions': mode = 'interactions' nint = int(data[1]) elif data[0] == 'rdf_pairs': mode = 'rdf_pairs' nint = int(data[1]) elif data[0] == 'adf_triplets': mode = 'adf_triplets' nint = int(data[1]) elif data[0] == 'sample_error': mode = 'sample_error' elif data[0] == 'match': if len(data) < 2: raise RuntimeError('match entry requires at least one keyword.') for i in range(1,len(data)): infp['match'].append(data[i]) mode = None elif data[0] == 'rdf_match': rdf_match = True if data[1] in ('true', 'True', 'T', 'TRUE') else False infp['rdf_match'] = rdf_match if rdf_match and len(data) > 2: weight = float(data[2]) infp['weights']['rdf'] = weight mode = None elif data[0] == 'adf_match': adf_match = True if data[1] in ('true', 'True', 'T', 'TRUE') else False infp['adf_match'] = adf_match if adf_match and len(data) > 2: weight = float(data[2]) infp['weights']['adf'] = weight mode = None elif data[0] == 'vol_match': vol_match = True if data[1] in ('true', 'True', 'T', 'TRUE') else False infp['vol_match'] = vol_match if vol_match and len(data) > 2: weight = float(data[2]) infp['weights']['vol'] = weight mode = None elif data[0] == 'lat_match': lat_match = True if data[1] in ('true', 'True', 'T', 'TRUE') else False infp['lat_match'] = lat_match if lat_match and len(data) > 2: weight = float(data[2]) infp['weights']['lat'] = weight mode = None elif data[0] == 'de_num_individuals': nind = int(data[1]) infp['de_num_individuals'] = nind mode = None elif data[0] == 'de_crossover_rate': cr = float(data[1]) infp['de_crossover_rate'] = cr mode = None elif data[0] == 'de_fraction': frac = float(data[1]) infp['de_fraction'] = frac mode = None elif data[0] == 'de_temperature': temp = float(data[1]) infp['de_temperature'] = temp mode = None elif data[0] == 'cs_num_individuals': nind = int(data[1]) infp['cs_num_individuals'] = nind mode = None elif data[0] == 'cs_fraction': frac = float(data[1]) infp['cs_fraction'] = frac mode = None elif data[0] == 'tpe_gamma': infp['tpe_gamma'] = float(data[1]) mode = None elif '_nsmpl_prior' in data[0]: infp['tpe_nsmpl_prior'] = int(data[1]) mode = None elif '_ntrial' in data[0]: infp['tpe_ntrial'] = int(data[1]) mode = None elif data[0] == 'update_vrange': infp['update_vrange'] = int(data[1]) mode = None else: if mode == 'interactions' and len(data) in (2,3): infp['interactions'].append(tuple(data)) elif mode == 'rdf_pairs' and len(data) == 2: infp['rdf_pairs'].append(tuple(data)) elif mode == 'adf_triplets' and len(data) == 3: infp['adf_triplets'].append(tuple(data)) else: mode = None pass return infp def write_info(infp,args): """ Write out information on input parameters for fp. """ print(' Inputs') print(' ----------') print(' num of processes (given by --nproc option) ',int(args['--nproc'])) try: if len(infp['param_files']) == 0: print(' potential {0:s}'.format(infp['potential'])) else: print(' param_files ',end='') for fname in infp['param_files']: print(f' {fname}', end='') print('') except: raise # print(' specorder ',infp['specorder']) fmethod = infp['fitting_method'] print(' fitting_method {0:s}'.format(fmethod)) if fmethod in ('de','DE'): print(' num_individuals ',infp['de_num_individuals']) print(' fraction {0:7.4f}'.format(infp['de_fraction'])) print(' temparature {0:7.4f}'.format(infp['de_temperature'])) print(' crossover_rate {0:7.4f}'.format(infp['de_crossover_rate'])) elif fmethod in ('cs','CS'): print(' num_individuals ',infp['cs_num_individuals']) print(' fraction {0:7.4f}'.format(infp['cs_fraction'])) elif fmethod in ('tpe','TPE','wpe','WPE'): pass else: print(' There is no such fitting method...') print(' num_iteration {0:d}'.format(infp['num_iteration'])) print(' missing_value {0:.1f}'.format(infp['missing_value'])) print(' ----------') print() return None def write_vars_fitpot(vs,vrs,fname='in.vars.fitpot',**kwargs): rc2 = kwargs['rc2'] rc3 = kwargs['rc3'] options = kwargs['options'] hardlim = kwargs['hardlim'] nv = len(vs) with open(fname,'w') as f: if 'hard-limit' in options.keys() and options['hard-limit']: f.write('! hard-limit: T\n') f.write('!\n') f.write(' {0:5d} {1:7.3f} {2:7.3f}\n'.format(nv,rc2,rc3)) for i in range(len(vs)): f.write(' {0:15.7f} {1:15.7f} {2:15.7f}'.format(vs[i],*vrs[i])) if 'hard-limit' in options.keys() and options['hard-limit']: f.write(' {0:10.4f} {1:10.4f}\n'.format(*hardlim[i])) else: f.write('\n') return None def parse_option(line): if line[0] not in ('#','!'): raise ValueError('Line is not a comment line.') data = line.split() key = None value = None if len(data) < 3: return key,value if 'hard-limit:' in data[1] or 'hard_limit:' in data[1]: key = 'hard-limit' value = False if data[2] not in ('No','no', 'NO', 'False', 'F', 'false'): value = True return key,value def read_vars_fitpot(fname='in.vars.fitpot'): with open(fname,'r') as f: lines = f.readlines() iv = 0 nv = -1 rc2 = 5.0 rc3 = 3.0 vs = [] vrs = [] vrsh = [] options = {} for line in lines: if line[0] in ('!','#'): k,v = parse_option(line) if k is not None: options[k] = v print(' option: ',k,v) continue data = line.split() if len(data) == 0: continue if nv < 0: nv = int(data[0]) rc2 = float(data[1]) rc3 = float(data[2]) continue else: iv += 1 if iv > nv: break if 'hard-limit' in options.keys() and options['hard-limit']: vs.append(float(data[0])) vrs.append([ float(data[1]), float(data[2])]) vrsh.append([float(data[3]), float(data[4])]) print(' iv,vrhmin,vrhmax= {0:3d} {1:11.3e} {2:11.3e}'.format(iv, float(data[3]), float(data[4]))) else: vs.append(float(data[0])) vrs.append([ float(data[1]), float(data[2])]) vrsh.append([-1e+30, 1e+30]) vs = np.array(vs) vrs = np.array(vrs) vrsh = np.array(vrsh) print('') return rc2,rc3,vs,vrs,vrsh,options def read_rdf(fname,specorder,pairs=[]): """ Read reference/FF RDF from data.(ref/pmd).rdf. """ with open(fname,'r') as f: lines = f.readlines() #...Make column indices for pairs icols = {} if len(pairs) == 0: raise ValueError("No pair !") for ip,p in enumerate(pairs): inc = 2 for i in range(len(specorder)): si = specorder[i] for j in range(i,len(specorder)): sj = specorder[j] if set(p) == set([si,sj]): icols[p] = inc inc += 1 rs = [] rdfs = {} for p in pairs: rdfs[p] = [] for line in lines: if line[0] in ('!','#'): continue data = line.split() if len(data) == 0: continue rs.append(float(data[0])) for ip,p in enumerate(pairs): icol = icols[p] rdfs[p].append(float(data[icol])) return rs,rdfs def read_adf(fname,specorder,triplets=[]): """ Read reference/FF ADF from data.(ref/pmd).adf.X-Y-Z where X,Y,Z are element name. """ with open(fname,'r') as f: lines = f.readlines() ths = [] adfs = {} for t in triplets: adfs[t] = [] for line in lines: if line[0] in ('!','#'): continue data = line.split() if len(data) == 0: continue ths.append(float(data[0])) for it,t in enumerate(triplets): adfs[t].append(float(data[it+1])) return ths, adfs def read_vol(fname): """ REead reference/FF volume from data.(ref/pmd).vol. """ with open(fname,'r') as f: vol = float(f.readline()) return vol def read_lat(fname): """ Read reference/FF lattice parameters from data.(ref/pmd).lat. """ with open(fname,'r') as f: lines = f.readlines() a,b,c,alp,bet,gmm = [ float(x) for x in lines[0].split() ] return a,b,c,alp,bet,gmm def get_data(basedir,prefix='ref',**kwargs): """ Get rdf, adf, vol from a given basedir. The prefix should be either ref or pmd. """ specorder = kwargs['specorder'] rdf_pairs = kwargs['rdf_pairs'] adf_triplets = kwargs['adf_triplets'] rs = [] rdfs = [] ths = [] adfs = [] vol = 0.0 a=b=c=alp=bet=gmm= 0.0 if kwargs['rdf_match']: rs,rdfs = read_rdf(basedir+'/data.{0:s}.rdf'.format(prefix),specorder,rdf_pairs) if kwargs['adf_match']: ths,adfs = read_adf(basedir+'/data.{0:s}.adf'.format(prefix),specorder,adf_triplets) if kwargs['vol_match']: vol = read_vol(basedir+'/data.{0:s}.vol'.format(prefix)) if kwargs['lat_match']: a,b,c,alp,bet,gmm = read_lat(basedir+'/data.{0:s}.lat'.format(prefix)) data = {'rs':rs, 'rdfs':rdfs, 'ths':ths, 'adfs':adfs, 'vol':vol, 'lat':(a,b,c,alp,bet,gmm)} return data def read_data(fname,): """ General routine of reading data. Input file format ----------------- ``` # Comment lines begins with '#' or '!' # 10 1.0 0.1234 0.2345 0.3456 0.4567 0.5678 0.6789 0.7890 0.8901 0.9012 0.0123 ``` - 1st line: num of data (NDAT), weight of the data (WDAT) - 2nd line-: data values (number of data should be equal to NDAT) """ if not os.path.exists(fname): raise RuntimeError('File not exsits: ',fname) with open(fname,'r') as f: lines = f.readlines() ndat = 0 wdat = 0.0 data = None idat = 0 done = False for line in lines: if line[0] in ('#','!'): continue ldat = line.split() if ndat < 1: ndat = int(ldat[0]) wdat = float(ldat[1]) data = np.zeros(ndat) else: if data is None: raise RuntimeError('data is None, which should not happen.') for i,d in enumerate(ldat): data[idat] = float(d) idat += 1 if idat == ndat: done = True break if done: break return {'ndat':ndat, 'wdat':wdat, 'data':data} def get_data2(basedir,prefix='ref',**kwargs): """ New implementation of get_data, which loads data to be used to fit parameters. The prefix should be either ref or pmd. """ matches = kwargs['match'] # print('matches=',matches) data = {} for m in matches: fname = basedir+'/data.{0:s}.{1:s}'.format(prefix,m) # print('m,fname=',m,fname) try: data[m] = read_data(fname,) except: data[m] = None pass return data def loss_func2(pmddata,eps=1.0e-8,**kwargs): """ Compute loss function value using general get_data2 func. """ refdata = kwargs['refdata'] losses = {} L = 0.0 misval = kwargs['missing_value'] luplim = kwargs['fval_upper_limit'] for name in refdata.keys(): ref = refdata[name] wgt = ref['wdat'] pmd = pmddata[name] if pmd == None: losses[name] = misval L += losses[name] *wgt continue num = ref['ndat'] refd = ref['data'] pmdd = pmd['data'] z2 = 0.0 sumdiff2 = 0.0 for n in range(num): # print('n=',n) diff = pmdd[n] -refd[n] sumdiff2 += diff*diff z2 += refd[n]*refd[n] losses[name] = min(sumdiff2 /(z2+eps), luplim) L += losses[name] *wgt if kwargs['print_level'] > 0: print(' iid,losses= {0:8d}'.format(kwargs['iid']),end='') for k in losses.keys(): loss = losses[k] print(' {0:10.4f}'.format(loss),end='') print(' {0:11.5f}'.format(L),flush=True) return L def loss_func(pmddata,eps=1.0e-8,**kwargs): """ Compute loss function value from reference and pmd data. """ refdata = kwargs['refdata'] wgts = kwargs['weights'] rdf_pairs = kwargs['rdf_pairs'] adf_triplets = kwargs['adf_triplets'] #...RDF Lr = 0.0 if kwargs['rdf_match'] and len(rdf_pairs) != 0: rs = refdata['rs'] rdfs_ref = refdata['rdfs'] rdfs_pmd = pmddata['rdfs'] for p in rdf_pairs: # print(p) rdf_ref = rdfs_ref[p] rdf_pmd = rdfs_pmd[p] diff2sum = 0.0 z = 0.0 for i,r in enumerate(rs): ref = rdf_ref[i] pmd = rdf_pmd[i] diff = pmd -ref diff2sum += diff*diff z += ref*ref # print('i,r,ref,pmd,z,diff2sum=',i,r,ref,pmd,z,diff2sum) Lr += diff2sum/(z+eps) #/len(rs) Lr /= len(rdf_pairs) #...ADF Lth = 0.0 if kwargs['adf_match'] and len(adf_triplets) != 0: ths = refdata['ths'] adfs_ref = refdata['adfs'] adfs_pmd = pmddata['adfs'] for t in adf_triplets: adf_ref = adfs_ref[t] adf_pmd = adfs_pmd[t] diff2sum = 0.0 z = 0.0 for i,th in enumerate(ths): ref = adf_ref[i] pmd = adf_pmd[i] diff = pmd -ref diff2sum += diff*diff z += ref*ref Lth += diff2sum /(z+eps) #/len(ths) Lth /= len(adf_triplets) #...volume Lvol = 0.0 if kwargs['vol_match']: vol_ref = refdata['vol'] vol_pmd = pmddata['vol'] diff = vol_pmd -vol_ref Lvol = diff*diff /(vol_ref*vol_ref+eps) #...lattice parameters Llat = 0.0 if kwargs['lat_match']: a0,b0,c0,alp0,bet0,gmm0 = refdata['lat'] # print('refdata=',refdata['lat']) a,b,c,alp,bet,gmm = pmddata['lat'] # print('pmddata=',pmddata['lat']) #...Need to take into account the definition difference bet/ dump and vasp # a0l,b0l,c0l,alp0l,bet0l,gmm0l = lat_vasp2dump(a0,b0,c0,alp0,bet0,gmm0) diff = ((a0-a)/a0)**2 \ +((b0-b)/b0)**2 \ +((c0-c)/c0)**2 \ +((alp0-alp)/alp0)**2 \ +((bet0-bet)/bet0)**2 \ +((gmm0-gmm)/gmm0)**2 # diff /= 6 Llat = diff lrw = Lr*wgts['rdf'] lthw = Lth*wgts['adf'] lvolw = Lvol*wgts['vol'] llatw = Llat*wgts['lat'] L = lrw +lthw +lvolw +llatw if kwargs['print_level'] > 0: print(' iid,Lr,Lth,Lvol,Llat,L= {0:8d}'.format(kwargs['iid']) +'{0:10.4f} {1:10.4f} {2:10.4f} {3:10.4f} {4:10.4f}'.format(lrw,lthw,lvolw,llatw,L), flush=True) return L def func_wrapper(variables, **kwargs): """ Wrapper function for the above loss_func(). This converts variables to be optimized to parameters for pmd, perform pmd, then get rdf, adf, and vol from the pmd result. Then give them to the above loss_func(). """ # infp = kwargs['infp'] # pairs = kwargs['pairs'] # triplets = kwargs['triplets'] refdata = kwargs['refdata'] wgts = kwargs['weights'] # specorder = kwargs['specorder'] refdata = kwargs['refdata'] subjobscript = kwargs['subjob-script'] subdir = kwargs['subdir-prefix'] +'{0:03d}'.format(kwargs['index']) print_level = kwargs['print_level'] # print('refdata=',refdata) # print('pairs=',pairs) # print('triplets=',triplets) #...Create in.params.XXX files in each subdir varsfp = {} varsfp['rc2'] = kwargs['rc2'] varsfp['rc3'] = kwargs['rc3'] varsfp['variables'] = variables cwd = os.getcwd() if not os.path.exists(subdir): os.mkdir(subdir) shutil.copy(subjobscript,subdir+'/') os.chdir(subdir) if len(kwargs['param_files']) != 0: fp2params(varsfp['variables'], **kwargs) else: if 'vids' not in kwargs.keys(): print(kwargs.keys()) if kwargs['potential'] == 'BVSx': fp2BVSx(varsfp, **kwargs) elif kwargs['potential'] == 'BVS': fp2BVS(varsfp, **kwargs) elif kwargs['Morse'] == 'Morse': fp2Morse(varsfp, **kwargs) #...Compute pmd in the subdir_### L_up_lim = kwargs['fval_upper_limit'] if print_level > 1: print('Running pmd and post-processing at '+subdir, flush=True) try: cmd = "./{0:s} > log.iid_{1:d}".format(subjobscript,kwargs['iid']) # subprocess.run(cmd.split(),check=True) # print('subdir,cmd=',subdir,cmd) subprocess.run(cmd,shell=True,check=True) os.chdir(cwd) # print('Going to get_data from ',subdir) if len(kwargs['match']) != 0: pmddata = get_data2(subdir,prefix='pmd',**kwargs) L = loss_func2(pmddata,**kwargs) else: pmddata = get_data(subdir,prefix='pmd',**kwargs) L = min( loss_func(pmddata,**kwargs), L_up_lim ) except Exception as e: if print_level > 1: print(' Since pmd or post-process failed at {0:s}, '.format(subdir) +'the upper limit value is applied to its loss function.', flush=True) os.chdir(cwd) L = L_up_lim return L def get_triplets(interact): triplets = [] for it in interact: if len(it) == 3: triplets.append(it) return triplets def get_pairs(interact): pairs = [] for it in interact: if len(it) == 2: pairs.append(it) return pairs def latprms2hmat(a,b,c,alp,bet,gmm): """ Convert lattice parameters to hmat. See https://arxiv.org/pdf/1506.01455.pdf """ # print(a,b,c,alp,bet,gmm) alpr = np.radians(alp) betr = np.radians(bet) gmmr = np.radians(gmm) # val = (cos(alpr) * cos(betr) - cos(gmmr))\ # / (sin(alpr) * sin(betr)) # val = max(abs(val),1.0) # gmmstar = np.arccos(val) a1 = np.zeros(3) a2 = np.zeros(3) a3 = np.zeros(3) a1[:] = [a, 0.0, 0.0] a2[:] = [b*cos(gmmr), b*sin(gmmr), 0.0] # a3[:] = [c*np.cos(betr), # -c*np.sin(betr)*np.cos(gmmstar), # c*np.sin(betr)*np.sin(gmmstar)] # print('alpr,cos(alpr)=',alpr,cos(alpr)) # print('betr,cos(betr)=',betr,cos(betr)) a3[:] = [c*cos(betr), c*(cos(alpr) -cos(betr)*cos(gmmr))/sin(gmmr), c*sqrt(sin(gmmr)**2 -cos(alpr)**2 -cos(betr)**2 +2.0*cos(alpr)*cos(betr)*cos(gmmr))/sin(gmmr)] hmat = np.zeros((3,3)) hmat[:,0] = a1[:] hmat[:,1] = a2[:] hmat[:,2] = a3[:] return hmat def lat_vasp2dump(a,b,c,alpha,beta,gamma): from nappy.napsys import to_lammps try: hmat = latprms2hmat(a,b,c,alpha,beta,gamma) # print('hmat=',hmat) xlo,xhi,ylo,yhi,zlo,zhi,xy,xz,yz,_ = to_lammps(hmat,[]) # print('after to_lammps=',xlo,xhi,ylo,yhi,zlo,zhi,xy,xz,yz) except Exception as e: raise a1 = np.array([xhi-xlo, 0.0, 0.0]) b1 = np.array([ xy, yhi-ylo, 0.0]) c1 = np.array([ xz, yz, zhi-zlo]) al = np.linalg.norm(a1) bl = np.linalg.norm(b1) cl = np.linalg.norm(c1) alpl = np.arccos(np.dot(b1,c1)/bl/cl) /np.pi *180.0 betl = np.arccos(np.dot(a1,c1)/al/cl) /np.pi *180.0 gmml = np.arccos(np.dot(a1,b1)/al/bl) /np.pi *180.0 return al,bl,cl,alpl,betl,gmml def main(): args = docopt(__doc__.format(os.path.basename(sys.argv[0]))) headline() start = time.time() nproc = int(args['--nproc']) infp = read_in_fitpot('in.fitpot') write_info(infp,args) rc2,rc3,vs,vrs,vrsh,options = read_vars_fitpot(infp['param_file']) kwargs = infp kwargs['options'] = options kwargs['hardlim'] = vrsh # kwargs['infp'] = infp kwargs['rc2'] = rc2 kwargs['rc3'] = rc3 kwargs['subdir-prefix'] = args['--subdir-prefix'] kwargs['subjob-script'] = args['--subjob-script'] kwargs['start'] = start smpldir = infp['sample_directory'] if len(infp['match']) != 0: refdata = get_data2(smpldir,prefix='ref',**kwargs) else: pairs = get_pairs(infp['interactions']) rdf_pairs = infp['rdf_pairs'] if len(rdf_pairs) == 0: # if no rdf_pairs are specied, all the pairs are selected specorder = infp['specorder'] rdf_pairs = [] for i,si in enumerate(specorder): for j in range(i,len(specorder)): sj = specorder[j] rdf_pairs.append((si,sj)) # print('pairs =',pairs) # print('rdf_pairs=',rdf_pairs) adf_triplets = infp['adf_triplets'] triplets = get_triplets(infp['interactions']) kwargs['pairs'] = pairs kwargs['rdf_pairs'] = rdf_pairs kwargs['triplets'] = triplets kwargs['adf_triplets'] = adf_triplets refdata = get_data(smpldir,prefix='ref',**kwargs) if kwargs['lat_match']: a0,b0,c0,alp0,bet0,gmm0 = refdata['lat'] #...Need to take into account the definition difference bet/ dump and vasp a,b,c,alp,bet,gmm = lat_vasp2dump(a0,b0,c0,alp0,bet0,gmm0) # print('Reference lattice parameters:',a,b,c,alp,bet,gmm) refdata['lat'] = (a,b,c,alp,bet,gmm) kwargs['refdata'] = refdata if len(kwargs['param_files']) != 0: # New version of treating in.params.XXX files for fname in kwargs['param_files']: with open(fname,'r') as f: kwargs[fname] = f.read() else: fbvs, rads, vids, npqs, charges = read_params_Coulomb('in.params.Coulomb') kwargs['fbvs'] = fbvs kwargs['rads'] = rads kwargs['vids'] = vids kwargs['npqs'] = npqs kwargs['charges'] = charges print(' # iid,losses= iid',end='') if len(kwargs['match']) > 0: for m in kwargs['match']: print(' {0:>9s}'.format(m),end='') else: if kwargs['rdf_match']: print(' {0:>9s}'.format('rdf'),end='') if kwargs['adf_match']: print(' {0:>9s}'.format('adf'),end='') if kwargs['vol_match']: print(' {0:>9s}'.format('vol'),end='') if kwargs['lat_match']: print(' {0:>9s}'.format('lat'),end='') print(' total') maxiter = kwargs['num_iteration'] if kwargs['fitting_method'] in ('de','DE'): N = infp['de_num_individuals'] F = infp['de_fraction'] T = infp['de_temperature'] CR = infp['de_crossover_rate'] opt = DE(N,F,CR,T, vs,vrs,vrsh, func_wrapper, write_vars_fitpot, nproc=nproc, **kwargs) elif kwargs['fitting_method'] in ('cs','CS','cuckoo','Cuckoo'): N = infp['cs_num_individuals'] F = infp['cs_fraction'] opt = CS(N,F, vs,vrs,vrsh, func_wrapper, write_vars_fitpot, nproc=nproc, **kwargs) elif kwargs['fitting_method'] in ('tpe','TPE','wpe','WPE'): nbatch = nproc opt = TPE(nbatch, vs, vrs, vrsh, func_wrapper, write_vars_fitpot,**kwargs) opt.run(maxiter) print('elapsed time = {0:f} sec.'.format(time.time()-start)) return None def headline(): print('') print(' fp.py --- fit parameters to any target property ---') print('') cmd = ' '.join(s for s in sys.argv) print(' Executed as {0:s}'.format(cmd)) hostname = subprocess.run(['hostname',], stdout=subprocess.PIPE).stdout.decode('utf-8') print(' on {0:s}'.format(hostname.strip())) print(' at {0:s}'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) print() print(' Please cite:') print(' 1) R. Kobayashi, J. Open Source Software, 6(57), 2768 (2021)') print(' 2) R. Kobayashi, Y. Miyaji, K. Nakano, M. Nakayama, APL Materials 8, 081111 (2020)') print() return None if __name__ == "__main__": main()
ryokbys/nap
nappy/fitpot/fp.py
Python
mit
29,415
[ "ADF", "VASP" ]
d30587965c5157d70a0d83c7423b5c7e07631d17a22dbddd5a671abc3103e408
import sqlite3 import os import types import hashlib import time import DIRAC from DIRAC import gLogger, S_OK, S_ERROR from DIRAC.FrameworkSystem.private.monitoring.Activity import Activity from DIRAC.Core.Utilities import Time class MonitoringCatalog: def __init__( self, dataPath ): """ Initialize monitoring catalog """ self.dbConn = False self.dataPath = dataPath self.log = gLogger.getSubLogger( "ActivityCatalog" ) self.createSchema() def __connect( self ): """ Connect to database """ if not self.dbConn: dbPath = "%s/monitoring.db" % self.dataPath self.dbConn = sqlite3.connect( dbPath, isolation_level = None ) def __dbExecute( self, query, values = False ): """ Execute a sql statement """ cursor = self.dbConn.cursor() self.log.debug( "Executing %s" % query ) executed = False while not executed: try: if values: cursor.execute( query, values ) else: cursor.execute( query ) executed = True except: time.sleep( 0.01 ) return cursor def __createTables( self ): """ Create tables if not already created """ self.log.info( "Creating tables in db" ) try: filePath = "%s/monitoringSchema.sql" % os.path.dirname( __file__ ) fd = open( filePath ) buff = fd.read() fd.close() except IOError, e: DIRAC.abort( 1, "Can't read monitoring schema", filePath ) while buff.find( ";" ) > -1: limit = buff.find( ";" ) + 1 sqlQuery = buff[ : limit ].replace( "\n", "" ) buff = buff[ limit : ] try: self.__dbExecute( sqlQuery ) except Exception, e: DIRAC.abort( 1, "Can't create tables", str( e ) ) def createSchema( self ): """ Create all the sql schema if it does not exist """ self.__connect() try: sqlQuery = "SELECT name FROM sqlite_master WHERE type='table';" c = self.__dbExecute( sqlQuery ) tablesList = c.fetchall() if len( tablesList ) < 2: self.__createTables() except Exception, e: self.log.fatal( "Failed to startup db engine", str( e ) ) return False return True def __delete( self, table, dataDict ): """ Execute an sql delete """ query = "DELETE FROM %s" % table valuesList = [] keysList = [] for key in dataDict: if type( dataDict[ key ] ) == types.ListType: orList = [] for keyValue in dataDict[ key ]: valuesList.append( keyValue ) orList.append( "%s = ?" % key ) keysList.append( "( %s )" % " OR ".join( orList ) ) else: valuesList.append( dataDict[ key ] ) keysList.append( "%s = ?" % key ) if keysList: query += " WHERE %s" % ( " AND ".join( keysList ) ) self.__dbExecute( "%s;" % query, values = valuesList ) def __select( self, fields, table, dataDict, extraCond = "", queryEnd = "" ): """ Execute a sql select """ valuesList = [] keysList = [] for key in dataDict: if type( dataDict[ key ] ) == types.ListType: orList = [] for keyValue in dataDict[ key ]: valuesList.append( keyValue ) orList.append( "%s = ?" % key ) keysList.append( "( %s )" % " OR ".join( orList ) ) else: valuesList.append( dataDict[ key ] ) keysList.append( "%s = ?" % key ) if type( fields ) in ( types.StringType, types.UnicodeType ): fields = [ fields ] if len( keysList ) > 0: whereCond = "WHERE %s" % ( " AND ".join( keysList ) ) else: whereCond = "" if extraCond: if whereCond: whereCond += " AND %s" % extraCond else: whereCond = "WHERE %s" % extraCond query = "SELECT %s FROM %s %s %s;" % ( ",".join( fields ), table, whereCond, queryEnd ) c = self.__dbExecute( query, values = valuesList ) return c.fetchall() def __insert( self, table, specialDict, dataDict ): """ Execute an sql insert """ valuesList = [] valuePoitersList = [] namesList = [] for key in specialDict: namesList.append( key ) valuePoitersList.append( specialDict[ key ] ) for key in dataDict: namesList.append( key ) valuePoitersList.append( "?" ) valuesList.append( dataDict[ key ] ) query = "INSERT INTO %s (%s) VALUES (%s);" % ( table, ", ".join( namesList ), ",".join( valuePoitersList ) ) c = self.__dbExecute( query, values = valuesList ) return c.rowcount def __update( self, newValues, table, dataDict, extraCond = "" ): """ Execute a sql update """ valuesList = [] keysList = [] updateFields = [] for key in newValues: updateFields.append( "%s = ?" % key ) valuesList.append( newValues[ key ] ) for key in dataDict: if type( dataDict[ key ] ) == types.ListType: orList = [] for keyValue in dataDict[ key ]: valuesList.append( keyValue ) orList.append( "%s = ?" % key ) keysList.append( "( %s )" % " OR ".join( orList ) ) else: valuesList.append( dataDict[ key ] ) keysList.append( "%s = ?" % key ) if len( keysList ) > 0: whereCond = "WHERE %s" % ( " AND ".join( keysList ) ) else: whereCond = "" if extraCond: if whereCond: whereCond += " AND %s" % extraCond else: whereCond = "WHERE %s" % extraCond query = "UPDATE %s SET %s %s;" % ( table, ",".join( updateFields ), whereCond ) c = self.__dbExecute( query, values = valuesList ) return c.rowcount def registerSource( self, sourceDict ): """ Register an activity source """ retList = self.__select( "id", "sources", sourceDict ) if len( retList ) > 0: return retList[0][0] else: self.log.info( "Registering source", str( sourceDict ) ) if self.__insert( "sources", { 'id' : 'NULL' }, sourceDict ) == 0: return - 1 return self.__select( "id", "sources", sourceDict )[0][0] def registerActivity( self, sourceId, acName, acDict ): """ Register an activity """ m = hashlib.md5() acDict[ 'name' ] = acName acDict[ 'sourceId' ] = sourceId m.update( str( acDict ) ) retList = self.__select( "filename", "activities", acDict ) if len( retList ) > 0: return retList[0][0] else: acDict[ 'lastUpdate' ] = int( Time.toEpoch() - 86000 ) filePath = m.hexdigest() filePath = "%s/%s.rrd" % ( filePath[:2], filePath ) self.log.info( "Registering activity", str( acDict ) ) if self.__insert( "activities", { 'id' : 'NULL', 'filename' : "'%s'" % filePath, }, acDict ) == 0: return - 1 return self.__select( "filename", "activities", acDict )[0][0] def getFilename( self, sourceId, acName ): """ Get rrd filename for an activity """ queryDict = { 'sourceId' : sourceId, "name" : acName } retList = self.__select( "filename", "activities", queryDict ) if len( retList ) == 0: return "" else: return retList[0][0] def findActivity( self, sourceId, acName ): """ Find activity """ queryDict = { 'sourceId' : sourceId, "name" : acName } retList = self.__select( "id, name, category, unit, type, description, filename, bucketLength, lastUpdate", "activities", queryDict ) if len( retList ) == 0: return False else: return retList[0] def activitiesQuery( self, selDict, sortList, start, limit ): fields = [ 'sources.id', 'sources.site', 'sources.componentType', 'sources.componentLocation', 'sources.componentName', 'activities.id', 'activities.name', 'activities.category', 'activities.unit', 'activities.type', 'activities.description', 'activities.bucketLength', 'activities.filename', 'activities.lastUpdate' ] extraSQL = "" if sortList: for sorting in sortList: if sorting[0] not in fields: return S_ERROR( "Sorting field %s is invalid" % sorting[0] ) extraSQL = "ORDER BY %s" % ",".join( [ "%s %s" % sorting for sorting in sortList ] ) if limit: if start: extraSQL += " LIMIT %s OFFSET %s" % ( limit, start ) else: extraSQL += " LIMIT %s" % limit retList = self.__select( ", ".join( fields ), 'sources, activities', selDict, 'sources.id = activities.sourceId', extraSQL ) return S_OK( ( retList, fields ) ) def setLastUpdate( self, sourceId, acName, lastUpdateTime ): queryDict = { 'sourceId' : sourceId, "name" : acName } return self.__update( { 'lastUpdate' : lastUpdateTime }, "activities", queryDict ) def getLastUpdate( self, sourceId, acName ): queryDict = { 'sourceId' : sourceId, "name" : acName } retList = self.__update( 'lastUpdate', "activities", queryDict ) if len( retList ) == 0: return False else: return retList[0] def queryField( self, field, definedFields ): """ Query the values of a field given a set of defined ones """ retList = self.__select( field, "sources, activities", definedFields, "sources.id = activities.sourceId" ) return retList def getMatchingActivities( self, condDict ): """ Get all activities matching the defined conditions """ retList = self.queryField( Activity.dbFields, condDict ) acList = [] for acData in retList: acList.append( Activity( acData ) ) return acList def registerView( self, viewName, viewData, varFields ): """ Register a new view """ retList = self.__select( "id", "views", { 'name' : viewName } ) if len( retList ) > 0: return S_ERROR( "Name for view name already exists" ) retList = self.__select( "name", "views", { 'definition' : viewData } ) if len( retList ) > 0: return S_ERROR( "View specification already defined with name '%s'" % retList[0][0] ) self.__insert( "views", { 'id' : 'NULL' }, { 'name' : viewName, 'definition' : viewData, 'variableFields' : ", ".join( varFields ) } ) return S_OK() def getViews( self, onlyStatic ): """ Get views """ queryCond = {} if onlyStatic: queryCond[ 'variableFields' ] = "" return self.__select( "id, name, variableFields", "views", queryCond ) def getViewById( self, viewId ): """ Get a view for a given id """ if type( viewId ) in ( types.StringType, types.UnicodeType ): return self.__select( "definition, variableFields", "views", { "name" : viewId } ) else: return self.__select( "definition, variableFields", "views", { "id" : viewId } ) def deleteView( self, viewId ): """ Delete a view """ self.__delete( "views", { 'id' : viewId } ) def getSources( self, dbCond, fields = [] ): if not fields: fields = "id, site, componentType, componentLocation, componentName" else: fields = ", ".join( fields ) return self.__select( fields, "sources", dbCond ) def getActivities( self, dbCond ): return self.__select( "id, name, category, unit, type, description, bucketLength", "activities", dbCond ) def deleteActivity( self, sourceId, activityId ): """ Delete a view """ acCond = { 'sourceId' : sourceId, 'id' : activityId } acList = self.__select( "filename", "activities", acCond ) if len( acList ) == 0: return S_ERROR( "Activity does not exist" ) rrdFile = acList[0][0] self.__delete( "activities", acCond ) acList = self.__select( "id", "activities", { 'sourceId' : sourceId } ) if len( acList ) == 0: self.__delete( "sources", { 'id' : sourceId } ) return S_OK( rrdFile )
coberger/DIRAC
FrameworkSystem/private/monitoring/MonitoringCatalog.py
Python
gpl-3.0
12,544
[ "DIRAC" ]
810276f8f933806ff03edfb28a71901a8e534bab60eacee14b2c65ba2620926a
############################################################################### ## ## Copyright 2011-2012 Tavendo GmbH ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ############################################################################### import re UA_FIREFOX = re.compile(".*Firefox/(\d*).*") UA_CHROME = re.compile(".*Chrome/(\d*).*") UA_CHROMEFRAME = re.compile(".*chromeframe/(\d*).*") UA_WEBKIT = re.compile(".*AppleWebKit/([0-9+\.]*)\w*.*") UA_WEBOS = re.compile(".*webos/([0-9+\.]*)\w*.*") UA_HPWEBOS = re.compile(".*hpwOS/([0-9+\.]*)\w*.*") # Chrome ============================================================= # Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11 # Chrome Frame ======================================================= # IE6 on Windows with Chrome Frame # Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; chromeframe/11.0.660.0) # Firefox ============================================================ # Windows 7 64 Bit # Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0a2) Gecko/20120227 Firefox/12.0a2 # Android ============================================================ # Firefox Mobile # Mozilla/5.0 (Android; Linux armv7l; rv:10.0.2) Gecko/20120215 Firefox/10.0.2 Fennec/10.0.2 # Chrome for Android (on ICS) # Mozilla/5.0 (Linux; U; Android-4.0.3; en-us; Galaxy Nexus Build/IML74K) AppleWebKit/535.7 (KHTML, like Gecko) CrMo/16.0.912.75 Mobile Safari/535.7 # Android builtin browser # Samsung Galaxy Tab 1 # Mozilla/5.0 (Linux; U; Android 2.2; de-de; GT-P1000 Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1 # Samsung Galaxy S # Mozilla/5.0 (Linux; U; Android 2.3.3; de-de; GT-I9000 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1 # Samsung Galaxy Note # Mozilla/5.0 (Linux; U; Android 2.3.6; de-de; GT-N7000 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1 # Samsung Galaxy ACE (no Flash since ARM) # Mozilla/5.0 (Linux; U; Android 2.2.1; de-de; GT-S5830 Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1 # WebOS ============================================================== # HP Touchpad # Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.5; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/234.83 Safari/534.6 TouchPad/1.0 # => Qt-WebKit, Hixie-76, Flash # Safari ============================================================= # iPod Touch, iOS 4.2.1 # Mozilla/5.0 (iPod; U; CPU iPhone OS 4_2_1 like Mac OS X; de-de) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5 # => Hixie-76 # MacBook Pro, OSX 10.5.8, Safari 5.0.6 # Mozilla/5.0 (Macintosh; Intel Mac OS X 10_5_8) AppleWebKit/534.50.2 (KHTML, like Gecko) Version/5.0.6 Safari/533.22.3 # => Hixie-76 # RFC6455 # Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534+ (KHTML, like Gecko) Version/5.1.2 Safari/534.52.7 # Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.24+ (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10 # Hixie-76 # Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.53.11 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10 # Hixie-76 # Mozilla/5.0 (Macintosh; Intel Mac OS X 10_5_8) AppleWebKit/534.50.2 (KHTML, like Gecko) Version/5.0.6 Safari/533.22.3 # Opera ============================================================== # Windows 7 32-Bit # Opera/9.80 (Windows NT 6.1; U; de) Presto/2.10.229 Version/11.61 # Windows 7 64-Bit # Opera/9.80 (Windows NT 6.1; WOW64; U; de) Presto/2.10.229 Version/11.62 # Samsung Galaxy S # Opera/9.80 (Android 2.3.3; Linux; Opera Mobi/ADR-1202231246; U; de) Presto/2.10.254 Version/12.00 # Samsung Galaxy Tab 1 # Opera/9.80 (Android 2.2; Linux; Opera Tablet/ADR-1203051631; U; de) Presto/2.10.254 Version/12.00 # Samsung Galaxy ACE: # Opera/9.80 (Android 2.2.1; Linux; Opera Mobi/ADR-1203051631; U; de) Presto/2.10.254 Version/12.00 # Nokia N8, Symbian S60 5th Ed., S60 Bell # Opera/9.80 (S60; SymbOS; Opera Mobi/SYB-1111151949; U; de) Presto/2.9.201 Version/11.50 def _lookupWsSupport(ua): """ Lookup if browser supports WebSocket (Hixie76, Hybi10+, RFC6455) natively, and if not, whether the web-socket-js Flash bridge works to polyfill that. Returns a tuple of booleans (ws_supported, needs_flash, detected) ws_supported = WebSocket is supported needs_flash = Flash Bridge is needed for support detected = the code has explicitly mapped the support/nosupport Params: ua = user agent string, i.e. flask.request.user_agent.string """ ## Internet Explorer ## ## FIXME: handle Windows Phone ## if ua.find("MSIE") >= 0: # IE10 has native support if ua.find("MSIE 10") >= 0: # native Hybi-10+ return (True, False, True) # first, check for Google Chrome Frame # http://www.chromium.org/developers/how-tos/chrome-frame-getting-started/understanding-chrome-frame-user-agent if ua.find("chromeframe") >= 0: r = UA_CHROMEFRAME.match(ua) try: v = int(r.groups()[0]) if v >= 14: # native Hybi-10+ return (True, False, True) except: # detection problem return (False, False, False) # Flash fallback if ua.find("MSIE 8") >= 0 or ua.find("MSIE 9") >= 0: return (True, True, True) # unsupported return (False, False, True) ## iOS ## if ua.find("iPhone") >= 0 or ua.find("iPad") >= 0 or ua.find("iPod") >= 0: ## native Hixie76 (as of March 2012), no Flash, no alternative browsers return (True, False, True) ## Android ## if ua.find("Android") >= 0: ## Firefox Mobile ## if ua.find("Firefox") >= 0: # Hybi-10+ for FF Mobile 8+ return (True, False, True) ## Opera Mobile ## if ua.find("Opera") >= 0: # Hixie76 for Opera 11+ return (True, False, True) ## Chrome for Android ## if ua.find("CrMo") >= 0: # http://code.google.com/chrome/mobile/docs/faq.html return (True, False, True) ## Android builtin Browser (ooold WebKit) ## if ua.find("AppleWebKit") >= 0: # Though we return WS = True, and Flash = True here, when the device has no actual Flash support, that # will get later detected in JS. This applies to i.e. ARMv6 devices like Samsung Galaxy ACE # builtin browser, only works via Flash return (True, True, True) # detection problem return (False, False, False) ## webOS ## if ua.find("hpwOS") >= 0 or ua.find("webos") >= 0: try: if ua.find("hpwOS") >= 0: vv = [int(x) for x in UA_HPWEBOS.match(ua).groups()[0].split('.')] if vv[0] >= 3: return (True, False, True) elif ua.find("webos") >= 0: vv = [int(x) for x in UA_WEBOS.match(ua).groups()[0].split('.')] if vv[0] >= 2: return (True, False, True) except: # detection problem return (False, False, False) else: # unsupported return (False, False, True) ## Opera ## if ua.find("Opera") >= 0: # Opera 11+ has Hixie76 (needs to be manually activated though) return (True, False, True) ## Firefox ## if ua.find("Firefox") >= 0: r = UA_FIREFOX.match(ua) try: v = int(r.groups()[0]) if v >= 7: # native Hybi-10+ return (True, False, True) elif v >= 3: # works with Flash bridge return (True, True, True) else: # unsupported return (False, False, True) except: # detection problem return (False, False, False) ## Safari ## if ua.find("Safari") >= 0 and not ua.find("Chrome") >= 0: # rely on at least Hixie76 return (True, False, True) ## Chrome ## if ua.find("Chrome") >= 0: r = UA_CHROME.match(ua) try: v = int(r.groups()[0]) if v >= 14: # native Hybi-10+ return (True, False, True) elif v >= 4: # works with Flash bridge return (True, True, True) else: # unsupported return (False, False, True) except: # detection problem return (False, False, False) # detection problem return (False, False, False) UA_DETECT_WS_SUPPORT_DB = {} def lookupWsSupport(ua, debug = True): ws = _lookupWsSupport(ua) if debug: if not UA_DETECT_WS_SUPPORT_DB.has_key(ua): UA_DETECT_WS_SUPPORT_DB[ua] = ws if not ws[2]: msg = "UNDETECTED" elif ws[0]: msg = "SUPPORTED" elif not ws[0]: msg = "UNSUPPORTED" else: msg = "ERROR" print "DETECT_WS_SUPPORT", ua, ws[0], ws[1], ws[2], msg return ws
cachedout/AutobahnPython
autobahn/autobahn/useragent.py
Python
apache-2.0
9,585
[ "Galaxy" ]
29705521baddc14dcf0fdc74d1b32c5b66bb129d2537314c459d4cf2b76992e0
import os import copy import math import functools import numpy as np from ddapp import transformUtils from ddapp.asynctaskqueue import AsyncTaskQueue from ddapp import objectmodel as om from ddapp import visualization as vis from ddapp import robotstate from ddapp import segmentation from ddapp import planplayback from ddapp.pointpicker import PointPicker from ddapp import vtkAll as vtk from ddapp.simpletimer import SimpleTimer from ddapp import affordanceupdater from ddapp.debugVis import DebugData from ddapp import affordanceitems from ddapp import ikplanner from ddapp import vtkNumpy from numpy import array from ddapp.uuidutil import newUUID import affordancepanel import ioUtils from ddapp.tasks.taskuserpanel import TaskUserPanel from ddapp.tasks.taskuserpanel import ImageBasedAffordanceFit import ddapp.tasks.robottasks as rt class TableDemo(object): def __init__(self, robotStateModel, playbackRobotModel, ikPlanner, manipPlanner, footstepPlanner, atlasDriver, lhandDriver, rhandDriver, multisenseDriver, view, sensorJointController, planPlaybackFunction, teleopPanel): self.planPlaybackFunction = planPlaybackFunction self.robotStateModel = robotStateModel self.playbackRobotModel = playbackRobotModel self.ikPlanner = ikPlanner self.manipPlanner = manipPlanner self.footstepPlanner = footstepPlanner self.atlasDriver = atlasDriver self.lhandDriver = lhandDriver self.rhandDriver = rhandDriver self.multisenseDriver = multisenseDriver self.sensorJointController = sensorJointController self.view = view self.teleopPanel = teleopPanel # live operation flags: self.useFootstepPlanner = True self.visOnly = False self.planFromCurrentRobotState = True self.useDevelopment = False if (self.useDevelopment): self.visOnly = True self.planFromCurrentRobotState = False extraModels = [self.robotStateModel] self.affordanceUpdater = affordanceupdater.AffordanceGraspUpdater(self.playbackRobotModel, self.ikPlanner, extraModels) else: extraModels = [self.playbackRobotModel] self.affordanceUpdater = affordanceupdater.AffordanceGraspUpdater(self.robotStateModel, self.ikPlanner, extraModels) self.optionalUserPromptEnabled = True self.requiredUserPromptEnabled = True self.plans = [] self.frameSyncs = {} self.graspingHand = 'left' # left, right, both self.tableData = None self.binFrame = None # top level switch between BDI or IHMC (locked base) and MIT (moving base and back) self.lockBack = True self.lockBase = True self.constraintSet = [] self.reachDist = 0.07 # Switch indicating whether to use affordances as a collision environment self.useCollisionEnvironment = True # Switch between simulation/visualisation and real robot operation def setMode(self, mode='visualization'): ''' Switches between visualization and real robot operation. mode='visualization' mode='robot' ''' if (mode == 'visualization'): print "Setting mode to VISUALIZATION" self.useDevelopment = True self.visOnly = True self.planFromCurrentRobotState = False extraModels = [self.robotStateModel] self.affordanceUpdater = affordanceupdater.AffordanceGraspUpdater(self.playbackRobotModel, self.ikPlanner, extraModels) else: print "Setting mode to ROBOT OPERATION" self.useDevelopment = False extraModels = [self.playbackRobotModel] self.affordanceUpdater = affordanceupdater.AffordanceGraspUpdater(self.robotStateModel, self.ikPlanner, extraModels) def addPlan(self, plan): self.plans.append(plan) ### Table and Bin Focused Functions def userFitTable(self): self.tableData = None self.picker = PointPicker(self.view, numberOfPoints=2, drawLines=True, callback=self.onSegmentTable) self.picker.start() def userFitBin(self): self.binFrame = None self.picker = PointPicker(self.view, numberOfPoints=2, drawLines=True, callback=self.onSegmentBin) self.picker.start() def waitForTableFit(self): while not self.tableData: yield def waitForBinFit(self): while not self.binFrame: yield def getInputPointCloud(self): polyData = segmentation.getCurrentRevolutionData() if polyData is None: obj = om.findObjectByName('scene') if obj: polyData = obj.polyData else: # fall back to map in case we used mapping rather than loading of a scene obj = om.findObjectByName('map') if obj: polyData = obj.polyData return polyData def onSegmentTable(self, p1, p2): print p1 print p2 self.picker.stop() om.removeFromObjectModel(self.picker.annotationObj) self.picker = None om.removeFromObjectModel(om.findObjectByName('table demo')) self.tableData = segmentation.segmentTableEdge(self.getInputPointCloud(), p1, p2) self.tableObj = vis.showPolyData(self.tableData.mesh, 'table', parent='table demo', color=[0,1,0]) self.tableFrame = vis.showFrame(self.tableData.frame, 'table frame', parent=self.tableObj, scale=0.2) self.tableBox = vis.showPolyData(self.tableData.box, 'table box', parent=self.tableObj, color=[0,1,0], visible=False) self.tableObj.actor.SetUserTransform(self.tableFrame.transform) self.tableBox.actor.SetUserTransform(self.tableFrame.transform) if self.useCollisionEnvironment: self.addCollisionObject(self.tableObj) def onSegmentBin(self, p1, p2): print p1 print p2 self.picker.stop() om.removeFromObjectModel(self.picker.annotationObj) self.picker = None om.removeFromObjectModel(om.findObjectByName('bin frame')) binEdge = p2 - p1 zaxis = [0.0, 0.0, 1.0] xaxis = np.cross(binEdge, zaxis) xaxis /= np.linalg.norm(xaxis) yaxis = np.cross(zaxis, xaxis) t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis) t.PostMultiply() t.Translate(p1) self.binFrame = vis.showFrame(t, 'bin frame', parent=None, scale=0.2) def sortClustersOnTable(self, clusters): ''' returns list copy of clusters, sorted left to right using the table coordinate system. (Table y axis points right to left) ''' tableYAxis = self.tableData.axes[1] tableOrigin = np.array(self.tableData.frame.GetPosition()) origins = [np.array(c.frame.GetPosition()) for c in clusters] dists = [np.dot(origin-tableOrigin, -tableYAxis) for origin in origins] return [clusters[i] for i in np.argsort(dists)] def cleanupSegmentedObjects(self): om.removeFromObjectModel(om.findObjectByName('segmentation')) self.clusterObjects = None self.segmentationData = None def segmentTableObjects(self): tableCentroid = segmentation.computeCentroid(self.tableData.box) self.tableData.frame.TransformPoint(tableCentroid, tableCentroid) data = segmentation.segmentTableScene(self.getInputPointCloud(), tableCentroid) data.clusters = self.sortClustersOnTable(data.clusters) self.clusterObjects = vis.showClusterObjects(data.clusters, parent='segmentation') self.segmentationData = data def graspTableObject(self, side): #linkName = self.ikPlanner.getHandLink(side) #t = self.ikPlanner.getLinkFrameAtPose(linkName, self.getPlanningStartPose()) #linkFrame = vis.updateFrame(t, '%s frame' % linkName, scale=0.2, visible=False, parent='planning') obj, objFrame = self.getNextTableObject(side) #frameSync = vis.FrameSync() #frameSync.addFrame(linkFrame) #frameSync.addFrame(objFrame) #self.frameSyncs[linkName] = frameSync #self.playbackRobotModel.connectModelChanged(self.onRobotModelChanged) self.affordanceUpdater.graspAffordance( obj.getProperty('Name') , side) def dropTableObject(self, side='left'): obj, _ = self.getNextTableObject(side) obj.setProperty('Visible', False) for child in obj.children(): child.setProperty('Visible', False) self.clusterObjects.remove(obj) # remove from clusterObjects om.removeFromObjectModel(obj) # remove from objectModel if self.useCollisionEnvironment: objAffordance = om.findObjectByName(obj.getProperty('Name') + ' affordance') objAffordance.setProperty('Collision Enabled', False) objAffordance.setProperty('Visible', False) self.affordanceUpdater.ungraspAffordance(obj.getProperty('Name')) def getNextTableObject(self, side='left'): assert len(self.clusterObjects) obj = self.clusterObjects[0] if side == 'left' else self.clusterObjects[-1] frameObj = obj.findChild(obj.getProperty('Name') + ' frame') if self.useCollisionEnvironment: self.prepCollisionEnvironment() collisionObj = om.findObjectByName(obj.getProperty('Name') + ' affordance') collisionObj.setProperty('Collision Enabled', False) return obj, frameObj def computeTableStanceFrame(self): assert self.tableData zGround = 0.0 tableHeight = self.tableData.frame.GetPosition()[2] - zGround t = transformUtils.copyFrame(self.tableData.frame) t.PreMultiply() t1 = transformUtils.frameFromPositionAndRPY([-x/2 for x in self.tableData.dims],[0,0,0]) t.Concatenate(t1) t2 = transformUtils.frameFromPositionAndRPY([-0.35, self.tableData.dims[1]*0.5, -tableHeight],[0,0,0]) t.Concatenate(t2) self.tableStanceFrame = vis.showFrame(t, 'table stance frame', parent=self.tableObj, scale=0.2) def computeBinStanceFrame(self): assert self.binFrame zGround = 0.0 binHeight = self.binFrame.transform.GetPosition()[2] - zGround t = vtk.vtkTransform() t.PostMultiply() t.Translate(-0.45, 0.1, -binHeight) t.Concatenate(self.binFrame.transform) self.binStanceFrame = vis.showFrame(t, 'bin stance frame', parent=None, scale=0.2) t = vtk.vtkTransform() t.PostMultiply() t.RotateZ(30) t.Translate(-0.8, 0.4, -binHeight) t.Concatenate(self.binFrame.transform) self.startStanceFrame = vis.showFrame(t, 'start stance frame', parent=None, scale=0.2) # TODO: deprecate this function: (to end of section): def moveRobotToTableStanceFrame(self): self.teleportRobotToStanceFrame(self.tableStanceFrame.transform) def moveRobotToBinStanceFrame(self): self.teleportRobotToStanceFrame(self.binStanceFrame.transform) def moveRobotToStartStanceFrame(self): self.teleportRobotToStanceFrame(self.startStanceFrame.transform) def planFootstepsToTable(self): self.planFootsteps(self.tableStanceFrame.transform) def planFootstepsToBin(self): self.planFootsteps(self.binStanceFrame.transform) def planFootstepsToStart(self): self.planFootsteps(self.startStanceFrame.transform) ### End Object Focused Functions ############################################################### ### Planning Functions ######################################################################## def planFootsteps(self, goalFrame): startPose = self.getPlanningStartPose() request = self.footstepPlanner.constructFootstepPlanRequest(startPose, goalFrame) self.footstepPlan = self.footstepPlanner.sendFootstepPlanRequest(request, waitForResponse=True) def planWalking(self): startPose = self.getPlanningStartPose() plan = self.footstepPlanner.sendWalkingPlanRequest(self.footstepPlan, startPose, waitForResponse=True) self.addPlan(plan) def planWalkToStance(self, stanceTransform): if self.useFootstepPlanner: self.planFootsteps(stanceTransform) self.planWalking() else: self.teleportRobotToStanceFrame(stanceTransform) def planPostureFromDatabase(self, groupName, postureName, side='left'): startPose = self.getPlanningStartPose() endPose = self.ikPlanner.getMergedPostureFromDatabase(startPose, groupName, postureName, side=side) newPlan = self.ikPlanner.computePostureGoal(startPose, endPose) self.addPlan(newPlan) # TODO: integrate this function with the ones below def getRaisedArmPose(self, startPose, side): return self.ikPlanner.getMergedPostureFromDatabase(startPose, 'General', 'arm up pregrasp', side) def getPreDropHighPose(self, startPose, side): return self.ikPlanner.getMergedPostureFromDatabase(startPose, 'table clearing', 'pre drop 1', side) def getPreDropLowPose(self, startPose, side): return self.ikPlanner.getMergedPostureFromDatabase(startPose, 'table clearing', 'pre drop 2', side) def getLoweredArmPose(self, startPose, side): return self.ikPlanner.getMergedPostureFromDatabase(startPose, 'General', 'handdown', side) def planPreGrasp(self, side='left'): startPose = self.getPlanningStartPose() endPose = self.ikPlanner.getMergedPostureFromDatabase(startPose, 'General', 'arm up pregrasp', side=side) newPlan = self.ikPlanner.computePostureGoal(startPose, endPose) self.addPlan(newPlan) def planLowerArm(self, side): startPose = self.getPlanningStartPose() endPose = self.ikPlanner.getMergedPostureFromDatabase(startPose, 'General', 'handdown', side=side) newPlan = self.ikPlanner.computePostureGoal(startPose, endPose) self.addPlan(newPlan) def planDropPostureRaise(self, side): startPose = self.getPlanningStartPose() poseA = self.getRaisedArmPose(startPose, side) poseB = self.getPreDropHighPose(startPose, side) poseC = self.getPreDropLowPose(startPose, side) plan = self.ikPlanner.computeMultiPostureGoal([startPose, poseA, poseB, poseC]) self.addPlan(plan) def planDropPostureLower(self, side): startPose = self.getPlanningStartPose() poseA = self.getPreDropHighPose(startPose, side) poseB = self.getRaisedArmPose(startPose, side) poseC = self.getLoweredArmPose(startPose, side) plan = self.ikPlanner.computeMultiPostureGoal([startPose, poseA, poseB, poseC]) self.addPlan(plan) def planDropPostureSwap(self, lowerSide, raiseSide): startPose = self.getPlanningStartPose() poseA = self.getRaisedArmPose(startPose, raiseSide) poseA = self.getPreDropHighPose(poseA, lowerSide) poseB = self.getPreDropHighPose(poseA, raiseSide) poseB = self.getRaisedArmPose(poseB, lowerSide) poseC = self.getPreDropLowPose(poseB, raiseSide) poseC = self.getLoweredArmPose(poseC, lowerSide) plan = self.ikPlanner.computeMultiPostureGoal([startPose, poseA, poseB, poseC]) self.addPlan(plan) def planLowerArmAndStand(self, side): startPose = self.getPlanningStartPose() endPose = self.getLoweredArmPose(startPose, side) endPose, info = self.ikPlanner.computeStandPose(endPose) plan = self.ikPlanner.computePostureGoal(startPose, endPose) self.addPlan(plan) def planReachToTableObject(self, side='left'): obj, frame = self.getNextTableObject(side) startPose = self.getPlanningStartPose() if self.ikPlanner.fixedBaseArm: # includes reachDist hack instead of in ikPlanner (TODO!) f = transformUtils.frameFromPositionAndRPY( np.array(frame.transform.GetPosition())-np.array([self.reachDist,0,0]), [0,0,-90] ) f.PreMultiply() f.RotateY(90) f.Update() self.constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, side, f, lockBase=False, lockBack=True) #newFrame = vis.FrameItem('reach_item', f, self.view) #self.constraintSet = self.ikPlanner.planGraspOrbitReachPlan(startPose, side, newFrame, constraints=None, dist=self.reachDist, lockBase=self.lockBase, lockBack=self.lockBack, lockArm=False) else: self.constraintSet = self.ikPlanner.planGraspOrbitReachPlan(startPose, side, frame, constraints=None, dist=self.reachDist, lockBase=self.lockBase, lockBack=self.lockBack, lockArm=False) loweringSide = 'left' if side == 'right' else 'right' armPose = self.getLoweredArmPose(startPose, loweringSide) armPoseName = 'lowered_arm_pose' self.ikPlanner.ikServer.sendPoseToServer(armPose, armPoseName) loweringSideJoints = [] if (loweringSide == 'left'): loweringSideJoints += self.ikPlanner.leftArmJoints else: loweringSideJoints += self.ikPlanner.rightArmJoints reachingSideJoints = [] if (side == 'left'): reachingSideJoints += self.ikPlanner.leftArmJoints else: reachingSideJoints += self.ikPlanner.rightArmJoints armPostureConstraint = self.ikPlanner.createPostureConstraint(armPoseName, loweringSideJoints) armPostureConstraint.tspan = np.array([1.0, 1.0]) self.constraintSet.constraints.append(armPostureConstraint) self.constraintSet.runIk() #armPose = self.getRaisedArmPose(startPose, side) #armPoseName = 'raised_arm_pose' #self.ikPlanner.ikServer.sendPoseToServer(armPose, armPoseName) #armPostureConstraint = self.ikPlanner.createPostureConstraint(armPoseName, reachingSideJoints) #armPostureConstraint.tspan = np.array([0.5, 0.5]) #self.constraintSet.constraints.append(armPostureConstraint) print 'planning reach to' plan = self.constraintSet.runIkTraj() self.addPlan(plan) def planReachToTableObjectCollisionFree(self, side ='left'): # Hard-coded demonstration of collision reaching to object on table # Using RRT Connect goalFrame = transformUtils.frameFromPositionAndRPY([1.05,0.4,1],[0,90,-90]) vis.showFrame(goalFrame,'goal frame') frameObj = om.findObjectByName( 'goal frame') startPose = self.getPlanningStartPose() self.constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, side, frameObj.transform, lockBase=self.lockBase, lockBack=self.lockBack) self.constraintSet.runIk() print 'planning reach to planReachToTableObjectCollisionFree' self.constraintSet.ikParameters.usePointwise = False self.constraintSet.ikParameters.useCollision = True self.teleopPanel.endEffectorTeleop.updateCollisionEnvironment() plan = self.constraintSet.runIkTraj() self.addPlan(plan) def planTouchTableObject(self, side='left'): obj, frame = self.getNextTableObject(side) startPose = self.getPlanningStartPose() if self.ikPlanner.fixedBaseArm: # includes distance hack and currently uses reachDist instead of touchDist (TODO!) f = transformUtils.frameFromPositionAndRPY( np.array(frame.transform.GetPosition())-np.array([self.reachDist,0,0]), [0,0,-90] ) f.PreMultiply() f.RotateY(90) f.Update() item = vis.FrameItem('reach_item', f, self.view) self.constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, side, f, lockBase=False, lockBack=True) else: self.constraintSet = self.ikPlanner.planGraspOrbitReachPlan(startPose, side, frame, dist=0.05, lockBase=self.lockBase, lockBack=self.lockBack) self.constraintSet.constraints[-1].tspan = [-np.inf, np.inf] self.constraintSet.constraints[-2].tspan = [-np.inf, np.inf] self.constraintSet.runIk() print 'planning touch' plan = self.constraintSet.runIkTraj() self.addPlan(plan) def planLiftTableObject(self, side): startPose = self.getPlanningStartPose() self.constraintSet = self.ikPlanner.planEndEffectorDelta(startPose, side, [0.0, 0.0, 0.15]) if not self.ikPlanner.fixedBaseArm: self.constraintSet.constraints[-1].tspan[1] = 1.0 endPose, info = self.constraintSet.runIk() if not self.ikPlanner.fixedBaseArm: endPose = self.getRaisedArmPose(endPose, side) reachingSideJoints = [] if (side == 'left'): reachingSideJoints += self.ikPlanner.leftArmJoints else: reachingSideJoints += self.ikPlanner.rightArmJoints endPoseName = 'raised_arm_end_pose' self.ikPlanner.ikServer.sendPoseToServer(endPose, endPoseName) postureConstraint = self.ikPlanner.createPostureConstraint(endPoseName, reachingSideJoints) postureConstraint.tspan = np.array([2.0, 2.0]) self.constraintSet.constraints.append(postureConstraint) #postureConstraint = self.ikPlanner.createPostureConstraint('q_nom', robotstate.matchJoints('.*_leg_kny')) #postureConstraint.tspan = np.array([2.0, 2.0]) #self.constraintSet.constraints.append(postureConstraint) #postureConstraint = self.ikPlanner.createPostureConstraint('q_nom', robotstate.matchJoints('back')) #postureConstraint.tspan = np.array([2.0, 2.0]) #self.constraintSet.constraints.append(postureConstraint) print 'planning lift' plan = self.constraintSet.runIkTraj() self.addPlan(plan) ### End Planning Functions #################################################################### ########## Glue Functions ##################################################################### def teleportRobotToStanceFrame(self, frame): self.sensorJointController.setPose('q_nom') stancePosition = frame.GetPosition() stanceOrientation = frame.GetOrientation() q = self.sensorJointController.q.copy() q[:2] = [stancePosition[0], stancePosition[1]] q[5] = math.radians(stanceOrientation[2]) self.sensorJointController.setPose('EST_ROBOT_STATE', q) def getHandDriver(self, side): assert side in ('left', 'right') return self.lhandDriver if side == 'left' else self.rhandDriver def openHand(self, side): #self.getHandDriver(side).sendOpen() self.getHandDriver(side).sendCustom(0.0, 100.0, 100.0, 0) def closeHand(self, side): self.getHandDriver(side).sendCustom(100.0, 100.0, 100.0, 0) def sendNeckPitchLookDown(self): self.multisenseDriver.setNeckPitch(40) def sendNeckPitchLookForward(self): self.multisenseDriver.setNeckPitch(15) def waitForAtlasBehaviorAsync(self, behaviorName): assert behaviorName in self.atlasDriver.getBehaviorMap().values() while self.atlasDriver.getCurrentBehaviorName() != behaviorName: yield def printAsync(self, s): yield print s def optionalUserPrompt(self, message): if not self.optionalUserPromptEnabled: return yield result = raw_input(message) if result != 'y': raise Exception('user abort.') def requiredUserPrompt(self, message): if not self.requiredUserPromptEnabled: return yield result = raw_input(message) if result != 'y': raise Exception('user abort.') def delay(self, delayTimeInSeconds): yield t = SimpleTimer() while t.elapsed() < delayTimeInSeconds: yield def waitForCleanLidarSweepAsync(self): currentRevolution = self.multisenseDriver.displayedRevolution desiredRevolution = currentRevolution + 2 while self.multisenseDriver.displayedRevolution < desiredRevolution: yield def getEstimatedRobotStatePose(self): return self.sensorJointController.getPose('EST_ROBOT_STATE') def getPlanningStartPose(self): if self.planFromCurrentRobotState: return self.getEstimatedRobotStatePose() else: if self.plans: return robotstate.convertStateMessageToDrakePose(self.plans[-1].plan[-1]) else: return self.getEstimatedRobotStatePose() def cleanupFootstepPlans(self): om.removeFromObjectModel(om.findObjectByName('walking goal')) om.removeFromObjectModel(om.findObjectByName('footstep plan')) self.footstepPlan = None def playSequenceNominal(self): assert None not in self.plans self.planPlaybackFunction(self.plans) def commitManipPlan(self): self.manipPlanner.commitManipPlan(self.plans[-1]) def commitFootstepPlan(self): self.footstepPlanner.commitFootstepPlan(self.footstepPlan) def waitForPlanExecution(self, plan): planElapsedTime = planplayback.PlanPlayback.getPlanElapsedTime(plan) return self.delay(planElapsedTime + 1.0) def animateLastPlan(self): plan = self.plans[-1] if not self.visOnly: self.commitManipPlan() return self.waitForPlanExecution(plan) def onRobotModelChanged(self, model): for linkName in self.frameSyncs.keys(): t = self.playbackRobotModel.getLinkFrame(linkName) vis.updateFrame(t, '%s frame' % linkName, scale=0.2, visible=False, parent='planning') def createCollisionPlanningScene(self, scene=1, moveRobot=False, loadPerception=False): if (loadPerception): #filename = os.path.expanduser('~/drc-testing-data/collision_scene/collision_scene.vtp') #polyData = ioUtils.readPolyData( filename ) pd = io.readPolyData('/home/mfallon/Desktop/rrt_scene/all.vtp') vis.showPolyData(pd,'scene') if (scene == 0): pose = (array([ 1.20, 0. , 0.8]), array([ 1., 0., 0., 0.])) desc = dict(classname='BoxAffordanceItem', Name='scene0-tabletop', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.5,1,0.06]) obj = affordancepanel.panel.affordanceFromDescription(desc) pose = (array([ 1.20, 0.5 , 0.4]), array([ 1., 0., 0., 0.])) desc = dict(classname='BoxAffordanceItem', Name='scene0-leg1', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.5,0.05,0.8]) obj = affordancepanel.panel.affordanceFromDescription(desc) pose = (array([ 1.20, -0.5 , 0.4]), array([ 1., 0., 0., 0.])) desc = dict(classname='BoxAffordanceItem', Name='scene0-leg2', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.5,0.05,0.8]) obj = affordancepanel.panel.affordanceFromDescription(desc) pose = (array([ 1.05, 0.3 , 0.98]), array([ 1., 0., 0., 0.])) desc = dict(classname='BoxAffordanceItem', Name='scene0-object1', uuid=newUUID(), pose=pose, Color=[0.9, 0.9, 0.1], Dimensions=[0.08,0.08,0.24]) obj = affordancepanel.panel.affordanceFromDescription(desc) pose = (array([ 1.25, 0.1 , 0.98]), array([ 1., 0., 0., 0.])) desc = dict(classname='BoxAffordanceItem', Name='scene0-object2', uuid=newUUID(), pose=pose, Color=[0.0, 0.9, 0.0], Dimensions=[0.07,0.07,0.25]) obj = affordancepanel.panel.affordanceFromDescription(desc) pose = (array([ 1.25, -0.1 , 0.95]), array([ 1., 0., 0., 0.])) desc = dict(classname='CylinderAffordanceItem', Name='scene0-object3', uuid=newUUID(), pose=pose, Color=[0.0, 0.9, 0.0], Radius=0.035, Length = 0.22) obj = affordancepanel.panel.affordanceFromDescription(desc) pose = (array([ 1.05, -0.2 , 0.95]), array([ 1., 0., 0., 0.])) desc = dict(classname='CylinderAffordanceItem', Name='scene0-object4', uuid=newUUID(), pose=pose, Color=[0.9, 0.1, 0.1], Radius=0.045, Length = 0.22) obj = affordancepanel.panel.affordanceFromDescription(desc) if (moveRobot): self.sensorJointController.q[0] = 0.67 self.sensorJointController.push() elif (scene == 1): pose = (array([-0.69, -1.50, 0.92]), array([-0.707106781, 0. , 0. , 0.707106781 ])) desc = dict(classname='BoxAffordanceItem', Name='scene1-tabletop', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.5,1,0.06]) obj = affordancepanel.panel.affordanceFromDescription(desc) pose = (array([-1.05, -1.10, 0.95]), array([-0.707106781, 0. , 0. , 0.707106781 ])) desc = dict(classname='BoxAffordanceItem', Name='scene1-edge1', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.1,0.3,0.05]) obj = affordancepanel.panel.affordanceFromDescription(desc) pose = (array([-0.35, -1.10, 0.95]), array([-0.707106781, 0. , 0. , 0.707106781 ])) desc = dict(classname='BoxAffordanceItem', Name='scene1-edge2', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.1,0.3,0.05]) obj = affordancepanel.panel.affordanceFromDescription(desc) pose = (array([-0.6803156 , -1.1826616 , 1.31299839]), array([-0.707106781, 0. , 0. , 0.707106781 ])) desc = dict(classname='BoxAffordanceItem', Name='scene1-edge3', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.14,1.0,0.07]) obj = affordancepanel.panel.affordanceFromDescription(desc) pose = (array([ -0.7, -1.5 , 1.03]), array([ 1., 0., 0., 0.])) desc = dict(classname='BoxAffordanceItem', Name='scene1-object1', uuid=newUUID(), pose=pose, Color=[0.9, 0.9, 0.1], Dimensions=[0.05,0.05,0.14]) obj = affordancepanel.panel.affordanceFromDescription(desc) if (moveRobot): self.sensorJointController.q[5] = -1.571 self.sensorJointController.q[0] = -0.75 self.sensorJointController.q[1] = -0.85 self.sensorJointController.push() elif (scene == 2): pose = (array([-0.98873106, 1.50393395, 0.91420001]), array([ 0.49752312, 0. , 0. , 0.86745072])) desc = dict(classname='BoxAffordanceItem', Name='scene2-tabletop', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.5,1,0.06]) obj = affordancepanel.panel.affordanceFromDescription(desc) pose = (array([-0.98873106, 1.50393395, 0.57]), array([ 0.49752312, 0. , 0. , 0.86745072])) desc = dict(classname='BoxAffordanceItem', Name='scene1-object1', uuid=newUUID(), pose=pose, Color=[0.005, 0.005, 0.3], Dimensions=[0.05,0.05,0.14]) obj = affordancepanel.panel.affordanceFromDescription(desc) if (moveRobot): self.sensorJointController.q[0] = -0.6 self.sensorJointController.q[1] = 1.1 self.sensorJointController.q[5] = 2.1 self.sensorJointController.push() ######### Setup collision environment #################### def prepCollisionEnvironment(self): assert len(self.clusterObjects) for obj in self.clusterObjects: self.addCollisionObject(obj) def addCollisionObject(self, obj): if om.getOrCreateContainer('affordances').findChild(obj.getProperty('Name') + ' affordance'): return # Affordance has been created previously frame = obj.findChild(obj.getProperty('Name') + ' frame') (origin, quat) = transformUtils.poseFromTransform(frame.transform) (xaxis, yaxis, zaxis) = transformUtils.getAxesFromTransform(frame.transform) # TODO: move this into transformUtils as getAxisDimensions or so box = obj.findChild(obj.getProperty('Name') + ' box') box_np = vtkNumpy.getNumpyFromVtk(box.polyData, 'Points') box_min = np.amin(box_np, 0) box_max = np.amax(box_np, 0) xwidth = np.linalg.norm(box_max[0]-box_min[0]) ywidth = np.linalg.norm(box_max[1]-box_min[1]) zwidth = np.linalg.norm(box_max[2]-box_min[2]) name = obj.getProperty('Name') + ' affordance' boxAffordance = segmentation.createBlockAffordance(origin, xaxis, yaxis, zaxis, xwidth, ywidth, zwidth, name, parent='affordances') boxAffordance.setSolidColor(obj.getProperty('Color')) boxAffordance.setProperty('Alpha', 0.3) ######### Nominal Plans and Execution ################################################################# def prepKukaTestDemoSequence(self, inputFile='~/drc-testing-data/tabletop/kinect_collision_environment.vtp'): filename = os.path.expanduser(inputFile) scene = ioUtils.readPolyData(filename) vis.showPolyData(scene,"scene") self.prepKukaLabScene() def prepKukaLabScene(self): self.userFitTable() self.onSegmentTable( np.array([ 0.91544128, 0.06092263, 0.14906664]), np.array([ 0.73494804, -0.21896157, 0.13435645]) ) self.userFitBin() # TODO: actually fit bin, put bin in picture. self.onSegmentBin( np.array([-0.02, 2.43, 0.61 ]), np.array([-0.40, 2.79, 0.61964661]) ) # TODO: fix bin location self.segmentTableObjects() # Plan sequence self.plans = [] def prepTestDemoSequence(self): ''' Running this function should launch a full planning sequence to pick to objects, walk and drop. Requires footstep footstepPlanner ''' filename = os.path.expanduser('~/drc-testing-data/tabletop/table-and-bin-scene.vtp') scene = ioUtils.readPolyData(filename) vis.showPolyData(scene,"scene") #stanceFrame = transformUtils.frameFromPositionAndRPY([0, 0, 0], [0, 0, 123.0]) #self.teleportRobotToStanceFrame(stanceFrame) self.userFitTable() self.onSegmentTable( np.array([-1.72105646, 2.73210716, 0.79449952]), np.array([-1.67336452, 2.63351011, 0.78698605]) ) self.userFitBin() self.onSegmentBin( np.array([-0.02, 2.43, 0.61 ]), np.array([-0.40, 2.79, 0.61964661]) ) self.computeTableStanceFrame() self.computeBinStanceFrame() # Actually plan the sequence: #self.demoSequence() def prepIhmcDemoSequenceFromFile(self): filename = os.path.expanduser('~/drc-testing-data/ihmc_table/ihmc_table.vtp') polyData = ioUtils.readPolyData( filename ) vis.showPolyData( polyData,'scene') self.prepIhmcDemoSequence() def prepIhmcDemoSequence(self): self.userFitBin() self.onSegmentBin( np.array([ 0.62, -1.33, 0.80]), np.array([ 0.89, -0.87, 0.57]) ) self.userFitTable() self.onSegmentTable( np.array([ 1.11, 0.11, 0.85]), np.array([ 0.97, 0.044, 0.84]) ) self.segmentTableObjects() self.computeBinStanceFrame() self.computeTableStanceFrame() def planSequence(self): self.useFootstepPlanner = True self.cleanupFootstepPlans() self.planFromCurrentRobotState = False self.segmentTableObjects() self.plans = [] # Go home self.planWalkToStance(self.startStanceFrame.transform) # Pick Objects from table: self.planWalkToStance(self.tableStanceFrame.transform) if (self.graspingHand == 'both'): self.planSequenceTablePick('left') self.planSequenceTablePick('right') else: self.planSequenceTablePick(self.graspingHand) # Go home self.planWalkToStance(self.startStanceFrame.transform) # Go to Bin self.planWalkToStance(self.binStanceFrame.transform) # Drop into the Bin: if (self.graspingHand == 'both'): self.planDropPostureRaise('left') self.dropTableObject('left') self.planDropPostureLower('left') self.planDropPostureRaise('right') self.dropTableObject('right') self.planDropPostureLower('right') else: self.planDropPostureRaise(self.graspingHand) self.dropTableObject(self.graspingHand) self.planDropPostureLower(self.graspingHand) # Go home self.planWalkToStance(self.startStanceFrame.transform) def planSequenceTablePick(self, side): self.planPreGrasp(side) if self.ikPlanner.fixedBaseArm: self.planLowerArm(side) self.planReachToTableObject(side) if not self.ikPlanner.fixedBaseArm: self.planTouchTableObject(side) # TODO: distance is handled by reach, hence ignore self.graspTableObject(side) self.planLiftTableObject(side) def autonomousExecute(self): ''' Use global variable self.useDevelopment to switch between simulation and real robot execution ''' #self.ikPlanner.ikServer.usePointwise = True #self.ikPlanner.ikServer.maxDegreesPerSecond = 20 taskQueue = AsyncTaskQueue() #self.addTasksToQueueInit(taskQueue) # Go home if not self.ikPlanner.fixedBaseArm: self.addTasksToQueueWalking(taskQueue, self.startStanceFrame.transform, 'Walk to Start') for _ in self.clusterObjects: # Pick Objects from table: if not self.ikPlanner.fixedBaseArm: self.addTasksToQueueWalking(taskQueue, self.tableStanceFrame.transform, 'Walk to Table') taskQueue.addTask(self.printAsync('Pick with Left Arm')) self.addTasksToQueueTablePick(taskQueue, 'left') #taskQueue.addTask(self.printAsync('Pick with Right Arm')) #self.addTasksToQueueTablePick(taskQueue, 'right') # Go home if not self.ikPlanner.fixedBaseArm: self.addTasksToQueueWalking(taskQueue, self.startStanceFrame.transform, 'Walk to Start') # Go to Bin if not self.ikPlanner.fixedBaseArm: self.addTasksToQueueWalking(taskQueue, self.binStanceFrame.transform, 'Walk to Bin') # Drop into the Bin: taskQueue.addTask(self.printAsync('Drop from Left Arm')) self.addTasksToQueueDropIntoBin(taskQueue, 'left') #taskQueue.addTask(self.printAsync('Drop from Right Arm')) #self.addTasksToQueueDropIntoBin(taskQueue, 'right') # Go home if not self.ikPlanner.fixedBaseArm: self.addTasksToQueueWalking(taskQueue, self.startStanceFrame.transform, 'Walk to Start') taskQueue.addTask(self.printAsync('done!')) return taskQueue def addTasksToQueueInit(self, taskQueue): taskQueue.addTask(self.printAsync('user fit table')) taskQueue.addTask(self.userFitTable) taskQueue.addTask(self.waitForTableFit) taskQueue.addTask(self.printAsync('user fit bin')) taskQueue.addTask(self.userFitBin) taskQueue.addTask(self.waitForBinFit) if not self.ikPlanner.fixedBaseArm: taskQueue.addTask(self.computeTableStanceFrame) taskQueue.addTask(self.computeBinStanceFrame) def addTasksToQueueTablePick(self, taskQueue, side): taskQueue.addTask(self.requiredUserPrompt('continue? y/n: ')) taskQueue.addTask(functools.partial(self.planPreGrasp, side)) taskQueue.addTask(self.animateLastPlan) taskQueue.addTask(self.requiredUserPrompt('continue? y/n: ')) taskQueue.addTask(functools.partial(self.planReachToTableObject, side)) taskQueue.addTask(self.animateLastPlan) if not self.ikPlanner.fixedBaseArm: # TODO: distance is handled by reach, hence ignore taskQueue.addTask(self.requiredUserPrompt('continue? y/n: ')) taskQueue.addTask(functools.partial(self.planTouchTableObject, side)) taskQueue.addTask(self.animateLastPlan) taskQueue.addTask(self.requiredUserPrompt('continue? y/n: ')) taskQueue.addTask(functools.partial(self.closeHand, side)) taskQueue.addTask(functools.partial(self.graspTableObject, side)) taskQueue.addTask(self.requiredUserPrompt('continue? y/n: ')) taskQueue.addTask(functools.partial(self.planLiftTableObject, side)) taskQueue.addTask(self.animateLastPlan) def addTasksToQueueDropIntoBin(self, taskQueue, side): taskQueue.addTask(self.requiredUserPrompt('continue? y/n: ')) taskQueue.addTask(functools.partial(self.planDropPostureRaise, side)) taskQueue.addTask(self.animateLastPlan) taskQueue.addTask(functools.partial(self.openHand, side)) taskQueue.addTask(functools.partial(self.dropTableObject, side)) taskQueue.addTask(self.requiredUserPrompt('continue? y/n: ')) if not self.ikPlanner.fixedBaseArm: taskQueue.addTask(functools.partial(self.planDropPostureLower, side)) else: taskQueue.addTask(functools.partial(self.planPreGrasp, side)) taskQueue.addTask(self.animateLastPlan) def addTasksToQueueWalking(self, taskQueue, stanceTransform, message): taskQueue.addTask(self.printAsync(message)) taskQueue.addTask( functools.partial(self.planWalkToStance, stanceTransform )) taskQueue.addTask(self.optionalUserPrompt('Send footstep plan. continue? y/n: ')) taskQueue.addTask(self.commitFootstepPlan) #taskQueue.addTask(self.animateLastPlan) # ought to wait until arrival, currently doesnt wait the right amount of time taskQueue.addTask(self.requiredUserPrompt('Have you arrived? y/n: ')) class TableTaskPanel(TaskUserPanel): def __init__(self, tableDemo): TaskUserPanel.__init__(self, windowTitle='Table Task') self.tableDemo = tableDemo self.addDefaultProperties() self.addButtons() self.addTasks() def addButtons(self): self.addManualSpacer() self.addManualButton('Lower arm', functools.partial(self.tableDemo.planLowerArm, 'left')) self.addManualSpacer() self.addManualButton('Raise arm', self.tableDemo.planPreGrasp) self.addManualSpacer() self.addManualButton('Commit Manip', self.tableDemo.commitManipPlan) def addDefaultProperties(self): self.params.addProperty('Hand', 0, attributes=om.PropertyAttributes(enumNames=['Left', 'Right'])) self.params.addProperty('Base', 0, attributes=om.PropertyAttributes(enumNames=['Fixed', 'Free'])) self.params.addProperty('Back', 1, attributes=om.PropertyAttributes(enumNames=['Fixed', 'Free'])) self._syncProperties() def onPropertyChanged(self, propertySet, propertyName): self._syncProperties() self.taskTree.removeAllTasks() self.addTasks() def _syncProperties(self): self.tableDemo.planFromCurrentRobotState = True if self.params.getPropertyEnumValue('Hand') == 'Left': self.tableDemo.graspingHand = 'left' else: self.tableDemo.graspingHand = 'right' if self.params.getPropertyEnumValue('Base') == 'Fixed': self.tableDemo.lockBase = True else: self.tableDemo.lockBase = False if self.params.getPropertyEnumValue('Back') == 'Fixed': self.tableDemo.lockBack = True else: self.tableDemo.lockBack = False def addTasks(self): # some helpers def addTask(task, parent=None): self.taskTree.onAddTask(task, copy=False, parent=parent) def addFunc(func, name, parent=None): addTask(rt.CallbackTask(callback=func, name=name), parent=parent) def addManipulation(func, name, parent=None): group = self.taskTree.addGroup(name, parent=parent) addFunc(func, name='plan motion', parent=group) addTask(rt.CheckPlanInfo(name='check manip plan info'), parent=group) addFunc(v.commitManipPlan, name='execute manip plan', parent=group) addTask(rt.WaitForManipulationPlanExecution(name='wait for manip execution'), parent=group) addTask(rt.UserPromptTask(name='Confirm execution has finished', message='Continue when plan finishes.'), parent=group) v = self.tableDemo self.taskTree.removeAllTasks() # graspingHand is 'left', side is 'Left' side = self.params.getPropertyEnumValue('Hand') ############### # add the tasks # prep prep = self.taskTree.addGroup('Preparation') addTask(rt.CloseHand(name='close grasp hand', side=side), parent=prep) addTask(rt.CloseHand(name='close left hand', side='Left'), parent=prep) addTask(rt.CloseHand(name='close right hand', side='Right'), parent=prep) addFunc(v.prepIhmcDemoSequenceFromFile, 'prep from file', parent=prep) # walk walk = self.taskTree.addGroup('Approach Table') addTask(rt.RequestFootstepPlan(name='plan walk to table', stanceFrameName='table stance frame'), parent=walk) addTask(rt.UserPromptTask(name='approve footsteps', message='Please approve footstep plan.'), parent=walk) addTask(rt.CommitFootstepPlan(name='walk to table', planName='table grasp stance footstep plan'), parent=walk) addTask(rt.SetNeckPitch(name='set neck position', angle=35), parent=walk) addTask(rt.WaitForWalkExecution(name='wait for walking'), parent=walk) # lift object # Not Collision Free: addManipulation(functools.partial(v.planPreGrasp, v.graspingHand ), name='raise arm') # seems to ignore arm side? addManipulation(functools.partial(v.planReachToTableObject, v.graspingHand), name='reach') # Collision Free: #addManipulation(functools.partial(v.planReachToTableObjectCollisionFree, v.graspingHand), name='reach') addFunc(functools.partial(v.graspTableObject, side=v.graspingHand), 'grasp', parent='reach') addManipulation(functools.partial(v.planLiftTableObject, v.graspingHand), name='lift object') # walk to start walkToStart = self.taskTree.addGroup('Walk to Start') addTask(rt.RequestFootstepPlan(name='plan walk to start', stanceFrameName='start stance frame'), parent=walkToStart) addTask(rt.UserPromptTask(name='approve footsteps', message='Please approve footstep plan.'), parent=walkToStart) addTask(rt.CommitFootstepPlan(name='walk to start', planName='start stance footstep plan'), parent=walkToStart) addTask(rt.WaitForWalkExecution(name='wait for walking'), parent=walkToStart) # walk to bin walkToBin = self.taskTree.addGroup('Walk to Bin') addTask(rt.RequestFootstepPlan(name='plan walk to bin', stanceFrameName='bin stance frame'), parent=walkToBin) addTask(rt.UserPromptTask(name='approve footsteps', message='Please approve footstep plan.'), parent=walkToBin) addTask(rt.CommitFootstepPlan(name='walk to start', planName='bin stance footstep plan'), parent=walkToBin) addTask(rt.WaitForWalkExecution(name='wait for walking'), parent=walkToBin) # drop in bin addManipulation(functools.partial(v.planDropPostureRaise, v.graspingHand), name='drop: raise arm') # seems to ignore arm side? addFunc(functools.partial(v.dropTableObject, side=v.graspingHand), 'drop', parent='drop: release') addManipulation(functools.partial(v.planDropPostureLower, v.graspingHand), name='drop: lower arm')
edowson/director
src/python/ddapp/tabledemo.py
Python
bsd-3-clause
48,436
[ "VTK" ]
652c8873d9e58dc5ce92091e982af1dfab8a7f2bf0944c11bc47a12657c0f5e0
from itertools import izip_longest from numpy import array from numpy.testing import assert_array_almost_equal import unittest from periodic.crystal import Crystal class TestNaCl(unittest.TestCase): # Test data is from the Navy's Crystal Lattie Structures website: # http://cst-www.nrl.navy.mil/lattice/struk.xmol/b1.pos # All distances are written in Angstroms. latticevecs = ([0.00000000, 2.81500000, 2.81500000], [2.81500000, 0.00000000, 2.81500000], [2.81500000, 2.81500000, 0.00000000]) basis = (('Na', array([0.00000000, 0.00000000, 0.00000000])), ('Cl', array([2.81500000, 2.81500000, 2.81500000]))) # (n1,n2,n3, [Symbol, position, Symbol, position, etc.]) images = ( (1, 0, 0, ['Na', array([ 0. , 2.815, 2.815]), 'Cl', array([ 2.815, 5.63 , 5.63 ])]), (0, 1, 0, ['Na', array([ 2.815, 0. , 2.815]), 'Cl', array([ 5.63 , 2.815, 5.63 ])]), (0, 0, 1, ['Na', array([ 2.815, 2.815, 0. ]), 'Cl', array([ 5.63 , 5.63 , 2.815])]), (-2, 3, -1, ['Na', array([ 5.63 , -8.445, 2.815]), 'Cl', array([ 8.445, -5.63 , 5.63 ])]) ) def setUp(self): self.crystal = Crystal(self.latticevecs, self.basis) def test_images(self): for (n1, n2, n3, solution) in self.images: computed = self.crystal.cell_n((n1, n2, n3)) print computed[1::2], solution[1::2] self.assertEqual(computed[0::2], solution[0::2]) assert_array_almost_equal(computed[1::2], solution[1::2], decimal=15) # print computed # print basis # for sol, com in izip_longest(solution, computed): # pass # assert_array_equal(basis, computed) if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testReplicate'] unittest.main()
danielsjensen1/crystalpy
tests/test_translate.py
Python
bsd-3-clause
1,987
[ "CRYSTAL" ]
3fc938a0ff6f267b394d25713b29d80307b3f92ed4ec281362fc7559edccf8f2
""" A general-purpose Simulation class for discrete events. The Simulation object is the central object of a simulation. It handles the simulation clock time and maintains communication between the components of the simulation. A simulation is structured as a directed graph of event-processing nodes called EventProcessors (EPs). EventProcessors generate data-carrying Events, which are routed through the graph to other EventProcessors via delayed connections. Most objects in the simulation will be a subclass of EventProcessor, customized to provide some specific behavior. There are different subclasses of Event, each doing different types of computation, and each can contain any arbitrary Python data. A simulation begins by giving each EventProcessor an opportunity to send any initial events. It then proceeds by processing and delivering events to EventProcessors in time order. After all events for the current time are processed, the simulation gives each EventProcessor a chance to do any final computation, after which simulation time skips to the time of the earliest event remaining in the queue. PORTS All connections between EPs are tagged with a source port and a destination port. Ports are internal addresses that EPs can use to distinguish between inputs and outputs. A port specifier can be any hashable Python object. If not specified, the input and output ports for a connection default to None. src_ports distinguish different types of output that an EP may produce. When sending output, the EP must call self.send_output() separately for each port. dest_ports distinguish different types of input an EP may receive and process. The EP is free to interpret the input port designation on an incoming event in any way it chooses. An example dest_port might be for an EP that receives 'ordinary' neural activity on the default port, and receives a separate modulatory signal that influences learning. The originator of the modulatory signal might have a connection to the EP with dest_port = 'Modulation'. Multiple ports can be grouped by a dest EP by assuming a convention that the keys are tuples, e.g. ('JointNormalize','Group1'), ('JointNormalize','Group2'). An example src_port use might be an EP that sends different events to itself than it sends out to other EPs. In this case the self connections might have src_port = 'Recurrent', and probably also a special dest_port. $Id$ """ __version__='$Revision$' import param from copy import copy, deepcopy import time import bisect # CEBALERT: Is it dangerous to have 'Forever' implemented # like this? To start with, min(Forever,1) gives # FixedPoint('-1.00,2') i.e. Forever. # Forever = FixedPoint(-1) Forever = -1 # (Python 2.6 includes support for float('inf') on all platforms?) #: Default path to the current simulation, from main #: Only to be used by script_repr(), to allow it to generate #: a runnable script _simulation_path="topo.sim" class EventProcessor(param.Parameterized): """ Base class for EventProcessors, i.e. objects that can accept and handle events. This base class handles the basic mechanics of connections and sending events, and stores both incoming and outgoing connections. The dest_ports attribute specifies which dest_ports are supported by this class; subclasses can augment or change this list if they wish. The special value dest_ports=None means to accept connections to any dest_port, while dest_ports=[None,'Trigger'] means that only connections to port None or port 'Trigger' are accepted. Similarly, the src_ports attribute specifies which src_ports will be given output by this class. """ __abstract = True dest_ports=[None] src_ports=[None] def __init__(self,**params): """ Create an EventProcessor. Note that just creating an EventProcessor does not mean it is part of the simulation (i.e. it is not in the simulation's list of EventProcessors, and it will not have its start() method called). To add an EventProcessor e to a simulation s, simply do s['name_of_e']=e. At this point, e's 'name' attribute will be set to 'name_of_e'. """ super(EventProcessor,self).__init__(**params) # A subclass could use another data stucture to optimize operations # specific to itself, if it also overrides _dest_connect(). self.in_connections = [] self.out_connections = [] self.simulation = None def _port_match(self,key,portlist): """ Returns True if the given key matches any port on the given list. In the default implementation, a port is considered a match if the port is == to the key, but subclasses of EventProcessor can override this to allow weaker forms of matching. """ return key in portlist # if extra parameters are required for an EP subclass, a # dictionary could be added to Simulation.connect() to hold # them, and passed on here def _src_connect(self,conn): """ Add the specified connection to the list of outgoing connections. Should only be called from Simulation.connect(). """ if self.src_ports and not self._port_match(conn.src_port,self.src_ports): raise ValueError("%s is not on the list of ports provided for outgoing connections for %s: %s." % (str(conn.src_port), self.__class__, str(self.src_ports))) # CB: outgoing connection must be uniquely named among others # going to the same destination. for existing_connection in self.out_connections: if existing_connection.name==conn.name and existing_connection.dest==conn.dest: raise ValueError('A connection out of an EventProcessor must have a unique name among connections to a particular destination; "%s" out of %s into %s already exists'%(conn.name,conn.dest,self.name)) # CB: alternative: outgoing connection must have a unique name ## for existing_connection in self.out_connections: ## if existing_connection.name==conn.name: ## raise ValueError('A connection out of an EventProcessor must have a unique name; "%s" out of %s already exists'%(conn.name,self.name)) self.out_connections.append(conn) def _dest_connect(self,conn): """ Add the specified connection to the list of incoming connections. Should only be called from Simulation.connect(). """ if self.dest_ports and not self._port_match(conn.dest_port,self.dest_ports): raise ValueError("%s is not on the list of ports allowed for incoming connections for %s: %s." % (str(conn.dest_port), self.__class__, str(self.dest_ports))) for existing_connection in self.in_connections: if existing_connection.name == conn.name: raise ValueError('A connection into an EventProcessor must have a unique name; "%s" into %s already exists'%(conn.name,self.name)) self.in_connections.append(conn) def start(self): """ Called by the simulation when the EventProcessor is added to the simulation. If an EventProcessor needs to have any code run when it is added to the simulation, the code can be put into this method in the subclass. """ pass ### JABALERT: Should change send_output to accept a list of src_ports, not a single src_port. def send_output(self,src_port=None,data=None): """ Send some data out to all connections on the given src_port. The data is deepcopied before it is sent out, to ensure that future changes to the data are not reflected in events from the past. """ out_conns_on_src_port = [conn for conn in self.out_connections if self._port_match(conn.src_port,[src_port])] data=deepcopy(data) for conn in out_conns_on_src_port: #self.verbose("Sending output on src_port %s via connection %s to %s" % (str(src_port), conn.name, conn.dest.name)) e=EPConnectionEvent(self.simulation.convert_to_time_type(conn.delay)+self.simulation.time(),conn,data,deep_copy=False) self.simulation.enqueue_event(e) def input_event(self,conn,data): """ Called by the simulation when an EPConnectionEvent is delivered; the EventProcessor should process the data somehow. """ raise NotImplementedError def process_current_time(self): """ Called by the simulation before advancing the simulation time. Allows the event processor to do any computation that requires that all events for this time have been delivered. Computations performed in this method should not generate any events with a zero time delay, or else causality could be violated. (By default, does nothing.) """ pass def script_repr(self,imports=[],prefix=" "): """Generate a runnable command for creating this EventProcessor.""" return _simulation_path+"['"+self.name+"']="+\ super(EventProcessor,self).script_repr(imports=imports,prefix=prefix) class EventProcessorParameter(param.Parameter): """Parameter whose value can be any EventProcessor instance.""" def __init__(self,default=EventProcessor(),**params): super(EventProcessorParameter,self).__init__(default=default,**params) def __set__(self,obj,val): if not isinstance(val,EventProcessor): raise ValueError("Parameter must be an EventProcessor.") else: super(EventProcessorParameter,self).__set__(obj,val) from param import parameterized class EPConnection(param.Parameterized): """ EPConnection stores basic information for a connection between two EventProcessors. """ ## JPALERT: This type-checking is redundant, since ## Simulation.connect() only allows the user to create connections ## between existing simulation objects, which must be EPs. Type ## checking here means that it is impossible to ever instantiate ## an EPConnection in any situation (including debugging) w/o ## making src and dest be EPs. However, there is nothing I can ## find that requires that the src or dest be EPs. While some ## *subclasses* of EPConnection (such as Projection) do require ## that their src and dest support the interfaces of some ## *subclasses* of EventProcessor (e.g. Sheet.activity), there is ## no reason that those objects have to be EPs, per se. IMO, ## excessive type checking removes much of the power of using a ## dynamic language like Python. ## src = EventProcessorParameter(default=None,constant=True,precedence=0.10,doc= ## """The EventProcessor from which messages originate.""") ## dest = EventProcessorParameter(default=None,constant=True,precedence=0.11,doc= ## """The EventProcessor to which messages are delivered.""") src = param.Parameter(default=None,constant=True,precedence=0.10,doc= """The EventProcessor from which messages originate.""") dest = param.Parameter(default=None,constant=True,precedence=0.11,doc= """The EventProcessor to which messages are delivered.""") src_port = param.Parameter(default=None,precedence=0.20,doc= """ Identifier that can be used to distinguish different types of outgoing connections. EventProcessors that generate only a single type of outgoing event will typically use a src_port of None. However, if multiple types of communication are meaningful, the EventProcessor can accept other values for src_port. It is up to the src EventProcessor to deliver appropriate data to each port, and to declare what will be sent over that port. """) dest_port = param.Parameter(default=None,precedence=0.21,doc= """ Identifier that can be used to distinguish different types of incoming connections. EventProcessors that accept only a single type of incoming event will typically use a src_port of None. However, if multiple types of communication are meaningful, the EventProcessor can accept other values for dest_port. It is up to the dest EventProcessor to process the data appropriately for each port, and to define what is expected to be sent to that port. """) # Should the lower bound be exclusive? delay = param.Number(default=0.05,bounds=(0,None),doc=""" Simulation time between generation of an Event by the src and delivery to the dest. Should normally be nonzero, to represent a causal with a well-defined ordering of events.""") private = param.Boolean(default=False,doc= """Set to true if this connection is for internal use only, not to be manipulated by a user.""") # CEBALERT: should be reimplemented. It's difficult to understand, # and contains the same code twice. But it does work. def remove(self): """ Remove this connection from its src's list of out_connections and its dest's list of in_connections. """ # remove from EPs that have this as in_connection i = 0 to_del = [] for in_conn in self.dest.in_connections: if in_conn is self: to_del.append(i) i+=1 for i in to_del: del self.dest.in_connections[i] # remove from EPs that have this as out_connection i = 0 to_del = [] for out_conn in self.src.out_connections: if out_conn is self: to_del.append(i) i+=1 for i in to_del: del self.src.out_connections[i] def script_repr(self,imports=[],prefix=" "): """Generate a runnable command for creating this connection.""" if self.private: return "" settings=[] for name,val in self.get_param_values(): try: # There may be a better way to figure out which parameters specify classes if issubclass(val,object): rep=val.__name__ # Generate import statement cls = val.__name__ mod = val.__module__ imports.append("from %s import %s" % (mod,cls)) except TypeError: if name=="src" or name=="dest": rep=None else: rep = parameterized.script_repr(val,imports,prefix,settings) if rep is not None: settings.append('%s=%s' % (name,rep)) # add import statement cls = self.__class__.__name__ mod = self.__module__ imports.append("from %s import %s" % (mod,cls)) return _simulation_path+".connect('"+self.src.name+"','"+self.dest.name+ \ "',connection_type="+self.__class__.__name__+ \ ",\n"+prefix+(",\n"+prefix).join(settings) + ")" # CB: event is not a Parameterized because of a (small) performance hit. class Event(object): """Hierarchy of classes for storing simulation events of various types.""" def __init__(self,time): self.time = time def __call__(self,sim): """ Cause some computation to be performed, deliver a message, etc., as appropriate for each subtype of Event. Should be passed the simulation object, to allow access to .time() etc. """ raise NotImplementedError def __cmp__(self,ev): """ Implements event comparison by time, allowing sorting, and queue maintenance using bisect module or minheap implementations, if needed. NOTE: identity comparisons should always be done using the 'is' operator, not '=='. """ if self.time > ev.time: return 1 elif self.time < ev.time: return -1 else: return 0 class EPConnectionEvent(Event): """ An Event for delivery to an EPConnection. Provides access to a data field, which can be used for anything the src wants to provide, and a link to the connection over which it has arrived, so that the dest can determine what to do with the data. By default, the data is deepcopied before being added to this instance for safety (e.g. so that future changes to data structures do not affect messages arriving from the past). However, if you can ensure that the copying is not necessary (e.g. if you deepcoy before sending a set of identical messages), then you can pass deep_copy=False to avoid the copy. """ def __init__(self,time,conn,data=None,deep_copy=True): super(EPConnectionEvent,self).__init__(time) assert isinstance(conn,EPConnection) self.data = deepcopy(data) if deep_copy else data self.conn = conn def __call__(self,sim): self.conn.dest.input_event(self.conn,self.data) def __repr__(self): return "EPConnectionEvent(time="+`self.time`+",conn="+`self.conn`+")" class CommandEvent(Event): """An Event consisting of a command string to execute.""" def __init__(self,time,command_string): """ Add the event to the simulation. Raises an exception if the command_string contains a syntax error. """ self.command_string = command_string self.__test() super(CommandEvent,self).__init__(time) def __repr__(self): return "CommandEvent(time="+`self.time`+", command_string='"+self.command_string+"')" def script_repr(self,imports=[],prefix=" "): """Generate a runnable command for creating this CommandEvent.""" return _simulation_path+'.schedule_command('\ +`self.time`+',"'+self.command_string+'")' def __call__(self,sim): """ exec's the command_string in __main__.__dict__. Be sure that any required items will be present in __main__.__dict__; in particular, consider what will be present after the network is saved and restored. For instance, results of scripts you have run, or imports they make---all currently available in __main__.__dict__---will not be saved with the network. """ # Presumably here to avoid importing __main__ into the rest of the file import __main__ param.Parameterized(name='CommandEvent').message("Running command %s" \ % (self.command_string)) try: exec self.command_string in __main__.__dict__ except: print "Error in scheduled command:" raise def __test(self): """ Check for SyntaxErrors in the command. """ try: compile(self.command_string,"CommandString","single") except SyntaxError: print "Error in scheduled command:" raise class FunctionEvent(Event): """ Event that executes a given function function(*args,**kw). """ def __init__(self,time,fn,*args,**kw): super(FunctionEvent,self).__init__(time) self.fn = fn self.args = args self.kw = kw def __call__(self,sim): self.fn(*self.args,**self.kw) def __repr__(self): return 'FunctionEvent(%s,%s,*%s,**%s)' % (`self.time`,`self.fn`,`self.args`,`self.kw`) class EventSequence(Event): """ Event that contains a sequence of other events to be scheduled and executed. The .time attributes of the events in the sequence are interpreted as offsets relative to the start time of the sequence itself. """ def __init__(self,time,sequence): super(EventSequence,self).__init__(time) self.sequence = sequence def __call__(self,sim): # Enqueue all the events in the sequence, offsetting their # times from the current time sched_time = sim.time() for ev in self.sequence: new_ev = copy(ev) sched_time += ev.time new_ev.time = sched_time sim.enqueue_event(new_ev) def __repr__(self): return 'EventSequence(%s,%s)' % (`self.time`,`self.sequence`) class PeriodicEventSequence(EventSequence): """ An EventSequence that reschedules itself periodically Takes a period argument that determines how often the sequence will be scheduled. If the length of the sequence is longer than the period, then the length of the sequence will be used as the period. """ ## JPHACKALERT: This should really be refactored into a ## PeriodicEvent class that periodically executes a single event, ## then the user can construct a periodic sequence using a ## combination of PeriodicEvent and EventSequence. This would ## change the behavior if the sequence length is longer than the ## period, but I'm not sure how important that is, and it might ## actually be useful the other way. def __init__(self,time,period,sequence): super(PeriodicEventSequence,self).__init__(time,sequence) self.period = period def __call__(self,sim): super(PeriodicEventSequence,self).__call__(sim) # Find the timed length of the sequence seq_length = sum(e.time for e in self.sequence) if seq_length < self.period: # If the sequence is shorter than the period, then reschedule # the sequence to occur again after the period self.time += self.period else: # If the sequence is longer than the period, then # reschedule to start after the sequence ends. self.time += seq_length sim.enqueue_event(self) def __repr__(self): return 'PeriodicEventSequence(%s,%s,%s)' % (`self.time`,`self.period`,`self.sequence`) # CB: code that previously existed in various places now collected # together. The original timing code was not properly tested, and the # current code has not been tested either: needs writing cleanly and # testing. This whole class is pretty difficult to follow. # ### JP: Is it possible that some or all of this can be more cleanly ### implemented using PeriodicEvents? from math import floor class SomeTimer(param.Parameterized): """ Provides a countdown timer for functions that run repeatedly. There are two distinct ways to use the timer. The first, via call_and_time(), is for calling some function every specified number of steps for a specified duration. Currently call_and_time() is used for timing calls to simulation.run() every 1.0 steps for 100 iterations. See the Simulation class for an example of using the timer in this way. The second, via call_fixed_num_times(), is for calling some function repeatedly a specified number of times. A case to use call_fixed_num_times() would be timing pattern presentations, where the number of times the pattern_presenter will be called is known in advance. Additionally, this method allows a list of arguments to be passed to the function (in this case, the permutation for each call). """ # * parameters to control formatting? # * the parameter types for some of the following could be more specific step = param.Parameter(default=2,doc= """Only relevant with call_and_time(), not call_fixed_num_times(). Each iteration, func is called as func(step). For example, step=1 with func set to topo.sim.time would cause the simulation time to advance once per iteration. The default value (None) gives 50 iterations for any value of simulation_duration passed to call_and_time(simulation_duration).""") estimate_interval = param.Number(default=50,doc= """Interval in simulation time between estimates.""") func = param.Parameter(default=None,instantiate=True,doc= """Function to be timed.""") ## func_args = Parameter(default=None,instantiate=True,doc= ## """Arguments passed to func at time of calling.""") simulation_time_fn = param.Parameter(default=None,instantiate=True,doc= """Function that returns the simulation time.""") real_time_fn = param.Parameter(default=time.time,instantiate=True,doc= """Function that returns the wallclock time.""") receive_info = param.Parameter(default=[],instantiate=True,doc= """List of objects that will receive timing information. Each must have a timing_info() method.""") stop = param.Boolean(default=False,doc= """If set to True, execution of func (and timing) will cease at the end of the current iteration.""") def __pass_out_info(self,time,percent,name,duration,remaining): [thing(time,percent,name,duration,remaining) for thing in self.receive_info] # CB: this used to say how long the operation took (in wallclock time) def __measure(self,fduration,step,arg_list=None): if not arg_list: # no list of arguments means not being called set number of times fixed_num_calls = False else: fixed_num_calls = True iters = int(floor(fduration/step)) recenttimes=[] if not fixed_num_calls: arg_list=[step]*iters simulation_starttime = self.simulation_time_fn() self.stop = False for i in xrange(iters): recenttimes.append(self.real_time_fn()) length = len(recenttimes) if (length>self.estimate_interval): recenttimes.pop(0) length-=1 self.func(arg_list[i]) percent = 100.0*i/iters estimate = (iters-i)*(recenttimes[-1]-recenttimes[0])/length self.__pass_out_info(time=self.simulation_time_fn(), percent=percent, name=self.func.__name__, duration=fduration, remaining=estimate) ## HACK refresh windows for camera in simulation time import topo if hasattr(topo, 'guimain'): topo.guimain.refresh_activity_windows() ## if self.stop: break if not self.stop: if not fixed_num_calls: # ensure specified duration has been respected, since code above might not # complete specified duration (integer number of iterations) leftover = fduration+simulation_starttime-self.simulation_time_fn() if leftover>0: self.func(leftover) percent = 100 self.__pass_out_info(time=self.simulation_time_fn(), percent=percent, name=self.func.__name__, duration=fduration, remaining=0) def call_fixed_num_times(self,args_for_iterations): """ Call self.func(args_for_iterations[i]) for all i in args_for_iterations. """ self.__measure(len(args_for_iterations),1.0,arg_list=args_for_iterations) def call_and_time(self,fduration): """ Call self.func(self.step or fduration/50.0) for fduration. """ # default to 50 steps unless someone set otherwise step = self.step or fduration/50.0 self.__measure(fduration,step) # CEBALERT: This singleton-producing mechanism is pretty complicated, # and it would be great if someone could simplify it. Getting all of # the behavior we want for e.g. Simulation is tricky, but there are # tests for it. Note that: # (1) There should only ever be one single Simulation instance for # which register is True. Creating, copying, and unpickling # need to take this into account. # (2) A Simulation instance for which register is False should # behave the same as any normal Python object. # # For how to use, see topo.base.simulation.Simulation or # topo.misc.commandline.GlobalParams. class OptionalSingleton(object): _inst = None def __new__(cls,singleton): """ Return the single instance stored in _inst if singleton is True; otherwise, return a new instance. """ if singleton: if cls is not type(cls._inst): cls._inst = object.__new__(cls) cls._inst._singleton = True return cls._inst else: new_inst = object.__new__(cls) new_inst._singleton = False return new_inst def __getnewargs__(self): return (self._singleton,) def __copy__(self): # An OptionalSingleton(singleton=False) instance is copied, while the # OptionalSingleton(singleton=True) instance is not copied. if self._singleton: return self else: # Ideally we'd just call "object.__copy__", but apparently # there's no such method. # CB: I *think* this is how to do a copy. Any better # ideas? Python's copy.copy() function calls an object's # __reduce__ method and then reconstructs the object from # that using copy._reconstruct(). new_obj = self.__class__(self._singleton) new_obj.__dict__ = copy(self.__dict__) return new_obj def __deepcopy__(self,m): if self._singleton: return self else: new_obj = self.__class__(self._singleton) new_obj.__dict__ = deepcopy(self.__dict__,m) return new_obj # CB: I might have bound __copy__ (& __deepcopy__) just to the # Simulation(singleton=True) instance to avoid the Simulation # class having a __copy__ method at all, but copy() only checks # the *class* for the existence of __copy__. # Simulation stores its events in a linear-time priority queue (i.e., a # sorted list.) For efficiency, e.g. for spiking neuron simulations, # we'll probably need to replace the linear priority queue with a more # efficient one. Jeff has an O(log N) minheap implementation, but # there are likely to be many others to select from. # class Simulation(param.Parameterized,OptionalSingleton): """ A simulation class that uses a simple sorted event list (instead of e.g. a sched.scheduler object) to manage events and dispatching. Simulation is a singleton: there is only one instance of Simulation, no matter how many times it is instantiated. """ # CEBALERT: Must use convert_to_time_type() everywhere a time is # specified to ensure all times are the appropriate type. Relying # on everyone to convert times to the time type seems dangerous! time_type = param.Parameter(default=float,constant=True,doc=""" Callable for converting user-specified times into the numeric type to be used for Simulation's time values. Simulation's time can be set to any numeric type that supports the usual Python numeric operations, including at least multiplication, addition, and subtraction. Most of the code assumes that fractional values are supported, as they are for floats, but it may be possible to use an integer type instead if various default fractional values are overridden. For instance, one might wish to use arbitrary precision floating-point time to avoid accumulating rounding errors. If stepping through a simulation every 0.05 time units, after 20 such steps using floats the simulation will not reach 1.0 exactly, but will instead be slightly higher or lower. With an arbitrary precision float type such a series of steps can be guaranteed to reach 1.0 exactly. Alternatively, one might wish to use a rational type so that events can be guaranteed to happen a certain number of times in a given interval, even if the ratio cannot be expressed as an even decimal or binary fraction. time_type determines how the user can specify times. For instance, if the user specifies a time as 0.05 (a float), this will be represented in floating point as something like 0.050000000000000003. gmpy.mpq will correctly convert that to mpq(1,20). Other types might require times to be specified as strings (e.g. decimal.Decimal('0.05') in Python 2.6); in this case the user would need to use a string whenever specifying a time (e.g. a connection delay or a call to run() ). Some other types will only produce the expected result if they are first configured appropriately (e.g. decimal.Decimal(0.05) in Python 2.7 will give something like Decimal('0.05000000000000000277555756156289135105907917022705078125') by default, but this can be controlled by using an appropriate decimal.Context). Extra arguments can be passed to time_type when it is being used to create the Simulation's time: see time_type_args. Some potentially useful number classes:: - gmpy.mpq: gmpy provides Python with access to the fast GNU Multi-Precision library (which requires GMP to be built); gmpy.mpq is gmpy's rational type. - fixedpoint.FixedPoint: pure Python fixed-point number, but quite slow. - Python's decimal.Decimal and fractions.Fraction classes. """) # CEBALERT: probably should remove this and allow people to deal # with functions requiring arguments # (e.g. fixedpoint.FixedPoint(time,precision=4)) by currying or # creating their own wrapper function. We wouldn't then need # convert_to_time_type(), it would instead just be a call to # time_type() (which should also be renamed). time_type_args = param.Parameter(default=(),constant=True,doc=""" Tuple of arguments passed to time_type after the first argument (which is the time).""") register = param.Boolean(default=True,constant=True,doc=""" Whether or not to register this Simulation. If True, this Simulation (when created explicitly or when unpickled) will replace any existing registered Simulation (if one exists). Thus only one Simulation with register=True can exist at any one time, which makes it simpler to handle defining and reloading a series of simulations without polluting the memory space with unused simulations.""") startup_commands = param.Parameter(instantiate=True,default=[],doc=""" List of string commands that will be exec'd in __main__.__dict__ (i.e. as if they were entered at the command prompt) every time this simulation is unpickled (and will be executed before the simulation is itself unpickled). For example, allows items to be imported before scheduled_commands are run. """) time_printing_format = param.String("%(_time)09.2f",doc=""" Format string to be used when the simulation time must be formatted as a string, e.g. for display or for basename(). When the string is evaluated, the time will be available as the attribute '_time'. """) basename_format = param.String("%(name)s_%(timestr)s",doc=""" Format string to be used by the basename() function. When the string is evaluated, the formatted time from time_printing_format will be available as the attribute 'timestr'. """) eps_to_start = [] name = param.Parameter(constant=False) ### Simulation(register=True) is a singleton # # # There is only ever one instance of Simulation(register=True). # This instance is stored in the '_inst' attribute; when __new__ # is called and register is True, this instance is created if it # doesn't already exist, and returned otherwise. copying or # deepcopying this instance returns the instance. # # For a Simulation with register False, calling __new__ results in # a new object as usual for Python objects. copying and # deepcopying returns a new Simulation with a copy or deepcopy # (respectively) of the original Simulation's __dict__. # # See OptionalSingleton for more information. def __new__(cls,*args,**kw): # simulate behavior of a parameter for register if 'register' in kw: register = kw['register'] # this elif shouldn't be required, but is needed for # unpickling: OptionalSingleton's __getnewargs__ is called # with register as the first argument (I don't know how else # to set register on unpickling). elif len(args)==1: register = args[0] else: register = cls.register n = OptionalSingleton.__new__(cls,register) # CEBALERT: without removing references to the Sheets from # from instances of Slice, those instances of Slice and the # Sheets they refer to are never garbage collected. # # This temporary implementation of cleanup code explicitly # removes references to Sheets from Slice instances for # typical simulations. # # 1: Better than having cleanup code here would be to make a # change to Slice, so that a Slice either doesn't have a # reference to a Sheet, or doesn't hold onto that reference. # # 2: Some cleanup code will always be required here in # Simulation.__new__: As well as removing references to Sheets # from Slices, it is also necessary to remove references to # sheets from Simulation's lists of EPs - otherwise the sheets # are not garbage collected and memory usage will go up every # time a new Simulation is created. This cleanup must be in # Simulation.__new__ so that it runs whenever a simulation is # created or unpickled (it can't be done e.g. in # load_snapshot). # # 3: this particular implementation assumes the only instances # of Slice are in ConnectionFields, which is true for our # simulations. (This won't matter when the slice cleanup # becomes unnecessary.) if hasattr(n,'_cleanup'): n._cleanup() # if we don't collect() here (exactly here - not in _cleanup, # and not later), gc seems to lose track of some objects and # there is still a (smaller) memory increase with every call # to load_snapshot() import gc gc.collect() return n # CEBALERT: see gc alert in __new__() def _cleanup(self): # will always be required: in case eps haven't been started # so are still in the list if hasattr(self,'eps_to_start'): self.eps_to_start[:]=[] if hasattr(self,'_event_processors'): for name,EP in self._event_processors.items(): for c in EP.in_connections: if hasattr(c,'_cleanup'): c._cleanup() # will always be required self._event_processors[name]=None # (check when cleaning up existing mechanism for # adding sheets e.g. sim['x']=sheet could first set # sim['x'] to None if there is already a sheet with # name x...) # CEBALERT: if we're keeping this, should have a better name def convert_to_time_type(self,time): """ Convert the supplied time to the Simulation's time_type. """ # pylint: disable-msg=W0201 if self.time_type_args: newtime = self.time_type(time,*self.time_type_args) else: newtime = self.time_type(time) return newtime # Note that __init__ can still be called after the # Simulation(register=True) instance has been created. E.g. with # Simulation.register is True, # Simulation(name='A'); Simulation(name='B') # # would leave the single Simulation(register=True) instance with # name=='B'. This is because, as is usual in Python, __new__ # creates an instance of a class, while __init__ is subsequently # given that instance (to initialize). def __init__(self,*args,**params): """ Initialize a Simulation instance. """ self._time = self.convert_to_time_type(0) param.Parameterized.__init__(self,**params) self._event_processors = {} if self.register: # Indicate that no specific name has been set self.name=params.get('name') # Set up debugging messages to include the simulator time param.parameterized.dbprint_prefix= \ (lambda: "Time: "+self.timestr()+" ") param.Dynamic.time_fn = self.time self.events = [] # CB: consider collections.deque? (PEP 290) self._events_stack = [] self.eps_to_start = [] self.item_scale=1.0 # this variable determines the size of each item in a diagram # CB (this comment applies to SomeTimer!): make this a # parameter for documentation? Otherwise nobody will know # about being able to adjust step. # # we set step to 2 so that by default timing doesn't slow simulation too much. but # e.g. leaving it as None would result in info at 2% increments of requested run duration, # no matter what duration (0.005 or 5, etc). self.timer = SomeTimer(func=self.run, simulation_time_fn=self.time) def __getitem__(self,item_name): """ Return item_name if it exists as an EventProcessor in the Simulation. See objects(). """ if not isinstance(item_name,str): raise TypeError("Expected string (objects in the Simulation are indexed by name); %s is a %s"%(item_name,type(item_name))) try: return self.objects()[item_name] except KeyError: raise AttributeError("Simulation doesn't contain '"+item_name+"'.") # CEBALERT: should this at least give a warning when an existing # EP is replaced? def __setitem__(self,ep_name,ep): """ Add ep to the simulation, setting its name to ep_name. ep must be an EventProcessor. If ep_name already exists in the simulation, ep overwrites the original object (as for a dictionary). Note: EventProcessors do not necessarily have to be added to the simulation to be used, but otherwise they will not receive the start() message. Adding a node to the simulation also sets the backlink node.simulation, so that the node can enqueue events and read the simulation time. """ if not isinstance(ep_name,str): raise TypeError("Expected string for item name (EPs in the Simulation are indexed by name).") if not isinstance(ep,EventProcessor): raise TypeError("Expected EventProcessor: objects in the Simulation must be EPs.") if ep in self._event_processors.values(): self.warning("EventProcessor "+str(ep)+" () already exists in the simulation and will not be added.") else: ep.initialized=False ep.name=ep_name ep.initialized=True # deletes and overwrites any existing EP with the same name, # silently, as if a dictionary if ep.name in self._event_processors: del self[ep.name] self._event_processors[ep_name] = ep ep.simulation = self self.eps_to_start.append(ep) def __delitem__(self,ep_name): """ Dictionary-style deletion of EPs from the simulation; see __delete_ep(). Deletes EP from simulation, plus connections that come into it and connections that go out of it. """ if not isinstance(ep_name,str): raise TypeError("Expected string for item name (EPs in the Simulation are indexed by name).") self.__delete_ep(ep_name) def __delete_ep(self,ep_name): """ Remove the specified EventProcessor from the simulation, plus delete connections that come into it and connections that go from it. (Used by 'del sim[ep_name]' (as for a dictionary) to delete an event processor from the simulation.) """ ep = self[ep_name] # remove from simulation list of eps del self._event_processors[ep_name] # remove out_conections that go to this ep for conn in ep.in_connections: conn.remove() # remove in_connections that come from this ep for conn in ep.out_connections: conn.remove() def __iter__(self): for obj in self.objects(): yield obj def time(self): """ Return the current simulation time. If the time returned will be used in the computation of a floating point variable, it should be cast into a floating point number by float(). """ return self._time def timestr(self,specified_time=None): """ Returns the specified time (or the current time, if none specified) formatted using time_printing_format, which allows users to control how much precision, etc. is used for time displays. """ # CEBALERT: I doubt this gets all attributes. Does it get # properties (not that there are any right now)? all_vars = dict(self.get_param_values()) all_vars.update(self.__dict__) if specified_time is not None: all_vars['_time']=specified_time timestr = self.time_printing_format % all_vars return timestr @property def timestr_prop(self): """ A property that simply returns self.timestr(); useful for setting the interactive command-line prompt. """ return self.timestr() def basename(self): """ Return a string suitable for labeling an object created by the current simulation at the current time. By default this is simply the name of the simulation + " " + the result from evaluating the time_printing_format parameter. """ all_vars = dict(self.get_param_values()) all_vars.update(self.__dict__) all_vars['timestr']=self.timestr() return self.basename_format % all_vars # Change current run() to _run(), and current run_and_time() to run()? # CEBALERT: need to simplify duration/until code. Hiding 'until' option # until it's fixed (presumably nobody's using it). def run_and_time(self,duration=Forever): # ,until=Forever): if duration==Forever: # CEBALERT: timing code not setup to handle indefinite durations # (e.g. 'Forever') self.run(duration) return else: self.timer.call_and_time(duration) def run(self,duration=Forever,until=Forever): """ Process simulation events for the specified duration or until the specified time. Arguments: duration = length of simulation time to run. Default: run indefinitely while there are still events in the event_queue. until = maximum simulation time to simulate. Default: run indefinitely while there are still events in the event queue. If both duration and until are used, the one that is reached first will apply. Note that duration and until should be specified in a format suitable for conversion (coercion?) into the Simulation's _time_type. """ # CEBALERT: need to do something about the global 'Forever'; # what it should be probably varies with _time_type. # CEBALERT: calls to topo.sim.run() within topo should use a # string to specify the time rather than a float (since float # is not compatible with all number types). duration = self.convert_to_time_type(duration) until = self.convert_to_time_type(until) # CEBHACKALERT: If I do topo.sim.run(10), then topo.sim.run(until=3), # topo.sim._time returns to 3 (i.e. the simulation time can jump backwards). # JP: This because of the weird sim._time = stop_time line at the end of this method. # see my HACKALERT below. # Initialize any EPs that haven't been started yet # # Anything that manipulates the event stack in some way # (e.g. calls state_push() *must* ensure that this code has # been executed first (i.e. the code must call topo.sim.run(0) # before doing anything). (Currently applies to # pattern_present(), Test Pattern's Present button, and # save_input_generators, but future code may need such calls # as well.) for e in self.eps_to_start: e.start() self.eps_to_start=[] # Complicated expression for min(time+duration,until) if duration == Forever: stop_time = until elif until == Forever: stop_time = self._time + duration else: stop_time = min(self._time+duration,until) did_event = False while self.events and (stop_time == Forever or self._time <= stop_time): # Loop while there are events and it's not time to stop. if self.events[0].time < self._time: # Warn and then discard events scheduled *before* the current time self.warning('Discarding stale (unprocessed) event',repr(self.events[0])) self.events.pop(0) elif self.events[0].time > self._time: # Before moving on to the next time, do any processing # necessary for the current time. This is necessary only # if some event has been delivered at the current time. if did_event: did_event = False #self.debug("Time to sleep; next event time: %s",self.timestr(self.events[0].time)) for ep in self._event_processors.values(): ep.process_current_time() # Set the time to the frontmost event. Bear in mind # that the front event may have been changed by the # .process_current_time() calls. if self.events[0].time > self._time: self.sleep(self.events[0].time-self._time) else: # Pop and call the event at the head of the queue. event = self.events.pop(0) self.debug(lambda:"Delivering %s"%(event)) event(self) did_event=True # The time needs updating if the events have not done it. #if self.events and self.events[0].time >= stop_time: # JPHACKALERT: This is weird. It can cause time to go backwards, # (see CEBHACKALERT above). Also, if the simulation runs out of # events before stop_time is reached, shouldn't that be reflected in # Simulation.time()? if stop_time != Forever : self._time = stop_time def sleep(self,delay): """ Advance the simulator time by the specified amount. By default simply increments the _time value, but subclasses can override this method as they wish, e.g. to wait for an external real time clock to advance first. """ self._time += self.convert_to_time_type(delay) def enqueue_event(self,event): """ Enqueue an Event at an absolute simulation clock time. """ assert isinstance(event,Event) if not self.events or event >= self.events[-1]: # The new event goes at the end of the event queue if there # isn't a queue right now, or if it's later than the last # event's time. self.events.append(event) elif event < self.events[0]: # If it's earlier than the first item it goes at the beginning. self.events.insert(0,event) else: # Otherwise, it's inserted at the appropriate # position somewhere inside the event queue. # New events are enqueued after (right of) existing # events with the same time, i.e. 'simultaneous' events # are executed FIFO. bisect.insort_right(self.events,event) def schedule_command(self,time,command_string): """ Add a command to execute in __main__.__dict__ at the specified time. The command should be a string. """ event = CommandEvent(time=self.convert_to_time_type(time),command_string=command_string) self.enqueue_event(event) def state_push(self): """ Save a copy of the current state of the simulation for later restoration. The saved copy includes all the events on the simulator stack (saved using event_push()). Each EventProcessor is also asked to save its own state. This operation is useful for testing something while being able to roll back to the original state. """ self.event_push() for ep in self._event_processors.values(): ep.state_push() param.Parameterized.state_push(self) def state_pop(self): """ Pop the most recently saved state off the stack. See state_push() for more details. """ self.event_pop() for ep in self._event_processors.values(): ep.state_pop() param.Parameterized.state_pop(self) def event_push(self): """ Save a copy of the events queue for later restoration. Same as state_push(), but does not ask EventProcessors to save their state. """ # CBALERT: does it make more sense to put the original events onto the # stack, and replace self.events with the copies? Not sure this makes # any practical difference currently. self._events_stack.append((self._time,[copy(event) for event in self.events])) def event_pop(self): """ Pop the most recently saved events queue off the stack. Same as state_pop(), but does not restore EventProcessors' state. """ self._time, self.events = self._events_stack.pop() def event_clear(self,event_type=EPConnectionEvent): """ Clear out all scheduled events of the specified type. For instance, with event_type=EPConnectionEvent, this function can be used to ensure that no pending EPConnectionEvents will remain on the queue during some analysis or measurement operation. One will usually want to do a state_push before using this function, then clear out the events that should be deleted, do the measurement or analysis, and then do state_pop to restore the original state. """ events_temp = [] for e in self.events: if not isinstance(e,event_type): events_temp = events_temp + [e] self.events = events_temp # Could just process src and dest in conn_params. # Also could accept the connection already created, rather than # creating one. def connect(self,src,dest,connection_type=EPConnection,**conn_params): """ Connect the src EventProcessor to the dest EventProcessor. The src and dest should be string names of existing EPs. Returns the connection that was created. If the connection hasn't been given a name, it defaults to 'srcTodest'. """ if 'name' not in conn_params: # Might want to have a way of altering the name if this one's # already in use. At the moment, an error is raised (correctly). conn_params['name'] = src+'To'+dest # Looks up src and dest in our dictionary of objects conn = connection_type(src=self[src],dest=self[dest],**conn_params) self[src]._src_connect(conn) self[dest]._dest_connect(conn) return conn def objects(self,baseclass=EventProcessor): """ Return a dictionary of simulation objects having the specified base class. All simulation objects have a base class of EventProcessor, and so the baseclass must be either EventProcessor or one of its subclasses. If there is a simulator called s, you can type e.g. s.objects().keys() to see a list of the names of all objects. """ return dict([(ep_name,ep) for (ep_name,ep) in self._event_processors.items() if isinstance(ep,baseclass)]) def connections(self): """Return a list of all unique connections to or from any object.""" # The return value cannot be a dictionary like objects(), # because connection names are not guaranteed to be unique connlists =[o.in_connections + o.out_connections for o in self.objects().values()] # Flatten one level conns=[] for cl in connlists: for c in cl: conns.append(c) return [c for c in set(conns)] def script_repr(self,imports=[],prefix=" "): """ Return a nearly runnable script recreating this simulation. Needs some work to make the result truly runnable. Only scheduled commands that have not yet been executed are included, because executed commands are not kept around. """ objs = [o.script_repr(imports=imports) for o in sorted(self.objects().values(), cmp=lambda x, y: cmp(x.name,y.name))] # CBENHANCEMENT: could allow user to plug in a sorting # function. E.g. might want to compare conns based on name # then dest then src if lots of conns share the same name (so # the order is always the same). conns = [o.script_repr(imports=imports) for o in sorted(self.connections(), cmp=lambda x, y: cmp(x.name,y.name))] cmds = [o.script_repr(imports=imports) for o in sorted(sorted([e for e in self.events if isinstance(e,CommandEvent)], cmp=lambda x, y: cmp(x.command_string,y.command_string)), cmp=lambda x, y: cmp(x.time,y.time))] # CEBALERT: hack to support importing the time type since the # scheduled actions will have times printed using the # time_type. imports.append("from %s import %s"%(self.time_type.__module__,self.time_type.__name__)) imps = sorted(set(imports)) vals = [_simulation_path + "." + p + "=" + repr(getattr(self,p)) for p in ["name","startup_commands"] if getattr(self,p)] return "\n\n# Imports:\n\n" + '\n'.join(imps) + \ "\n\n\n" + '\n\n'.join(vals) + \ '\n\n\n\n# Objects:\n\n' + '\n\n\n'.join(objs) + \ '\n\n\n\n# Connections:\n\n' + '\n\n\n'.join(conns) + \ '\n\n\n\n# Scheduled commands:\n\n' + '\n'.join(cmds) # Convenience function for use in graphical editors of the simulation def grid_layout(self,objgrid,xstart=100,xstep=150,ystart=100,ystep=150,item_scale=1.0): """ Set the layout_location of simulation objects in a grid pattern. Takes a list of lists of simulation objects, or names of simulation objects, and positions them with layout_locations left-to-right, top-to-bottom, starting at (xstart,ystart) and advancing by xstep and ystep. The object None can be placed in the grid to skip a grid space. """ self.item_scale=item_scale y = ystart for row in objgrid: x = xstart for obj in row: if obj: if isinstance(obj,str): self[obj].layout_location = x,y else: obj.layout_location = x,y x += xstep y += ystep class RealTimeSimulation(Simulation): """ A (quasi) real-time simulation object. This subclass of Simulation attempts to maintain a correspondence between simulation time and real time, as defined by the timescale parameter. Real time simulation instances still maintain a nominal, discrete simulation time that determines the order of event delivery. At the beginning of each simulation time epoch, the simulation marks the actual wall clock time. After event delivery for that epoch has ended, the simulation calculates the amount of computation time used for event processing, and executes a real sleep for the remainder of the epoch. If the computation time for the epoch exceeded the real time, a warning is issued and processing proceeds immediately to the next simulation time epoch. RUN HOOKS The simulation includes as parameters two lists of functions/callables, run_start_hooks and run_stop_hooks, that will be called immediately before and after event processing during a call to .run(). This allows, for example, starting and stopping of real-time devices that might use resources while the simulation is not running. """ timescale = param.Number(default=1.0,bounds=(0,None),doc=""" The desired real length of one simulation time unit, in milliseconds.""") run_start_hooks = param.HookList(default=[],doc=""" A list of callable objects to be called on entry to .run(), before any events are processed.""") run_stop_hooks = param.HookList(default=[],doc=""" A list of callable objects to be called on exit from .run() after all events are processed.""") def __init__(self,**params): super(RealTimeSimulation,self).__init__(**params) self._real_timestamp = 0.0 def run(self,*args,**kw): for h in self.run_start_hooks: h() self._real_timestamp = self.real_time() super(RealTimeSimulation,self).run(*args,**kw) for h in self.run_stop_hooks: h() def real_time(self): return time.time() * 1000 def sleep(self,delay): """ Sleep for the number of real milliseconds seconds corresponding to the given delay, subtracting off the amount of time elapsed since the last sleep. """ sleep_ms = delay*self.timescale-(self.real_time()-self._real_timestamp) if sleep_ms < 0: self.warning("Realtime fault. Sleep delay of %f requires realtime sleep of %.2f ms." %(delay,sleep_ms)) else: self.debug("sleeping. delay =",delay,"real delay =",sleep_ms,"ms.") time.sleep(sleep_ms/1000.0) self._real_timestamp = self.real_time() self._time += delay
ioam/svn-history
topo/base/simulation.py
Python
bsd-3-clause
64,355
[ "NEURON" ]
9e785f4aad6ed667e63d89395128e0d378fe2a3985c60b1b612196c2031a3e66
#!/usr/bin/env python # coding: utf-8 # # Neural networks # # Artificial neural networks are computational systems that can learn to # perform tasks by considering examples, generally without being # programmed with any task-specific rules. It is supposed to mimic a # biological system, wherein neurons interact by sending signals in the # form of mathematical functions between layers. All layers can contain # an arbitrary number of neurons, and each connection is represented by # a weight variable. # # # The field of artificial neural networks has a long history of # development, and is closely connected with the advancement of computer # science and computers in general. A model of artificial neurons was # first developed by McCulloch and Pitts in 1943 to study signal # processing in the brain and has later been refined by others. The # general idea is to mimic neural networks in the human brain, which is # composed of billions of neurons that communicate with each other by # sending electrical signals. Each neuron accumulates its incoming # signals, which must exceed an activation threshold to yield an # output. If the threshold is not overcome, the neuron remains inactive, # i.e. has zero output. # # This behaviour has inspired a simple mathematical model for an artificial neuron. # <!-- Equation labels as ordinary links --> # <div id="artificialNeuron"></div> # # $$ # \begin{equation} # y = f\left(\sum_{i=1}^n w_ix_i\right) = f(u) # \label{artificialNeuron} \tag{1} # \end{equation} # $$ # Here, the output $y$ of the neuron is the value of its activation function, which have as input # a weighted sum of signals $x_i, \dots ,x_n$ received by $n$ other neurons. # # Conceptually, it is helpful to divide neural networks into four # categories: # 1. general purpose neural networks for supervised learning, # # 2. neural networks designed specifically for image processing, the most prominent example of this class being Convolutional Neural Networks (CNNs), # # 3. neural networks for sequential data such as Recurrent Neural Networks (RNNs), and # # 4. neural networks for unsupervised learning such as Deep Boltzmann Machines. # # In natural science, DNNs and CNNs have already found numerous # applications. In statistical physics, they have been applied to detect # phase transitions in 2D Ising and Potts models, lattice gauge # theories, and different phases of polymers, or solving the # Navier-Stokes equation in weather forecasting. Deep learning has also # found interesting applications in quantum physics. Various quantum # phase transitions can be detected and studied using DNNs and CNNs, # topological phases, and even non-equilibrium many-body # localization. Representing quantum states as DNNs quantum state # tomography are among some of the impressive achievements to reveal the # potential of DNNs to facilitate the study of quantum systems. # # In quantum information theory, it has been shown that one can perform # gate decompositions with the help of neural. # # The applications are not limited to the natural sciences. There is a # plethora of applications in essentially all disciplines, from the # humanities to life science and medicine. # # # An artificial neural network (ANN), is a computational model that # consists of layers of connected neurons, or nodes or units. We will # refer to these interchangeably as units or nodes, and sometimes as # neurons. # # It is supposed to mimic a biological nervous system by letting each # neuron interact with other neurons by sending signals in the form of # mathematical functions between layers. A wide variety of different # ANNs have been developed, but most of them consist of an input layer, # an output layer and eventual layers in-between, called *hidden # layers*. All layers can contain an arbitrary number of nodes, and each # connection between two nodes is associated with a weight variable. # # Neural networks (also called neural nets) are neural-inspired # nonlinear models for supervised learning. As we will see, neural nets # can be viewed as natural, more powerful extensions of supervised # learning methods such as linear and logistic regression and soft-max # methods we discussed earlier. # # # ### Feed-forward neural networks # # The feed-forward neural network (FFNN) was the first and simplest type # of ANNs that were devised. In this network, the information moves in # only one direction: forward through the layers. # # Nodes are represented by circles, while the arrows display the # connections between the nodes, including the direction of information # flow. Additionally, each arrow corresponds to a weight variable # (figure to come). We observe that each node in a layer is connected # to *all* nodes in the subsequent layer, making this a so-called # *fully-connected* FFNN. # # # # ### Convolutional Neural Network # # A different variant of FFNNs are *convolutional neural networks* # (CNNs), which have a connectivity pattern inspired by the animal # visual cortex. Individual neurons in the visual cortex only respond to # stimuli from small sub-regions of the visual field, called a receptive # field. This makes the neurons well-suited to exploit the strong # spatially local correlation present in natural images. The response of # each neuron can be approximated mathematically as a convolution # operation. (figure to come) # # Convolutional neural networks emulate the behaviour of neurons in the # visual cortex by enforcing a *local* connectivity pattern between # nodes of adjacent layers: Each node in a convolutional layer is # connected only to a subset of the nodes in the previous layer, in # contrast to the fully-connected FFNN. Often, CNNs consist of several # convolutional layers that learn local features of the input, with a # fully-connected layer at the end, which gathers all the local data and # produces the outputs. They have wide applications in image and video # recognition. # # ### Recurrent neural networks # # So far we have only mentioned ANNs where information flows in one # direction: forward. *Recurrent neural networks* on the other hand, # have connections between nodes that form directed *cycles*. This # creates a form of internal memory which are able to capture # information on what has been calculated before; the output is # dependent on the previous computations. Recurrent NNs make use of # sequential information by performing the same task for every element # in a sequence, where each element depends on previous elements. An # example of such information is sentences, making recurrent NNs # especially well-suited for handwriting and speech recognition. # # ### Other types of networks # # There are many other kinds of ANNs that have been developed. One type # that is specifically designed for interpolation in multidimensional # space is the radial basis function (RBF) network. RBFs are typically # made up of three layers: an input layer, a hidden layer with # non-linear radial symmetric activation functions and a linear output # layer (''linear'' here means that each node in the output layer has a # linear activation function). The layers are normally fully-connected # and there are no cycles, thus RBFs can be viewed as a type of # fully-connected FFNN. They are however usually treated as a separate # type of NN due the unusual activation functions. # # # ## Multilayer perceptrons # # One uses often so-called fully-connected feed-forward neural networks # with three or more layers (an input layer, one or more hidden layers # and an output layer) consisting of neurons that have non-linear # activation functions. # # Such networks are often called *multilayer perceptrons* (MLPs). # # # According to the *Universal approximation theorem*, a feed-forward # neural network with just a single hidden layer containing a finite # number of neurons can approximate a continuous multidimensional # function to arbitrary accuracy, assuming the activation function for # the hidden layer is a **non-constant, bounded and # monotonically-increasing continuous function**. # # Note that the requirements on the activation function only applies to # the hidden layer, the output nodes are always assumed to be linear, so # as to not restrict the range of output values. # # # # The output $y$ is produced via the activation function $f$ # $$ # y = f\left(\sum_{i=1}^n w_ix_i + b_i\right) = f(z), # $$ # This function receives $x_i$ as inputs. # Here the activation $z=(\sum_{i=1}^n w_ix_i+b_i)$. # In an FFNN of such neurons, the *inputs* $x_i$ are the *outputs* of # the neurons in the preceding layer. Furthermore, an MLP is # fully-connected, which means that each neuron receives a weighted sum # of the outputs of *all* neurons in the previous layer. # # # First, for each node $i$ in the first hidden layer, we calculate a weighted sum $z_i^1$ of the input coordinates $x_j$, # <!-- Equation labels as ordinary links --> # <div id="_auto1"></div> # # $$ # \begin{equation} z_i^1 = \sum_{j=1}^{M} w_{ij}^1 x_j + b_i^1 # \label{_auto1} \tag{2} # \end{equation} # $$ # Here $b_i$ is the so-called bias which is normally needed in # case of zero activation weights or inputs. How to fix the biases and # the weights will be discussed below. The value of $z_i^1$ is the # argument to the activation function $f_i$ of each node $i$, The # variable $M$ stands for all possible inputs to a given node $i$ in the # first layer. We define the output $y_i^1$ of all neurons in layer 1 as # <!-- Equation labels as ordinary links --> # <div id="outputLayer1"></div> # # $$ # \begin{equation} # y_i^1 = f(z_i^1) = f\left(\sum_{j=1}^M w_{ij}^1 x_j + b_i^1\right) # \label{outputLayer1} \tag{3} # \end{equation} # $$ # where we assume that all nodes in the same layer have identical # activation functions, hence the notation $f$. In general, we could assume in the more general case that different layers have different activation functions. # In this case we would identify these functions with a superscript $l$ for the $l$-th layer, # <!-- Equation labels as ordinary links --> # <div id="generalLayer"></div> # # $$ # \begin{equation} # y_i^l = f^l(u_i^l) = f^l\left(\sum_{j=1}^{N_{l-1}} w_{ij}^l y_j^{l-1} + b_i^l\right) # \label{generalLayer} \tag{4} # \end{equation} # $$ # where $N_l$ is the number of nodes in layer $l$. When the output of # all the nodes in the first hidden layer are computed, the values of # the subsequent layer can be calculated and so forth until the output # is obtained. # # # # # The output of neuron $i$ in layer 2 is thus, # <!-- Equation labels as ordinary links --> # <div id="_auto2"></div> # # $$ # \begin{equation} # y_i^2 = f^2\left(\sum_{j=1}^N w_{ij}^2 y_j^1 + b_i^2\right) # \label{_auto2} \tag{5} # \end{equation} # $$ # <!-- Equation labels as ordinary links --> # <div id="outputLayer2"></div> # # $$ # \begin{equation} # = f^2\left[\sum_{j=1}^N w_{ij}^2f^1\left(\sum_{k=1}^M w_{jk}^1 x_k + b_j^1\right) + b_i^2\right] # \label{outputLayer2} \tag{6} # \end{equation} # $$ # where we have substituted $y_k^1$ with the inputs $x_k$. Finally, the ANN output reads # <!-- Equation labels as ordinary links --> # <div id="_auto3"></div> # # $$ # \begin{equation} # y_i^3 = f^3\left(\sum_{j=1}^N w_{ij}^3 y_j^2 + b_i^3\right) # \label{_auto3} \tag{7} # \end{equation} # $$ # <!-- Equation labels as ordinary links --> # <div id="_auto4"></div> # # $$ # \begin{equation} # = f_3\left[\sum_{j} w_{ij}^3 f^2\left(\sum_{k} w_{jk}^2 f^1\left(\sum_{m} w_{km}^1 x_m + b_k^1\right) + b_j^2\right) # + b_1^3\right] # \label{_auto4} \tag{8} # \end{equation} # $$ # We can generalize this expression to an MLP with $l$ hidden # layers. The complete functional form is, # <!-- Equation labels as ordinary links --> # <div id="completeNN"></div> # # $$ # \begin{equation} # y^{l+1}_i = f^{l+1}\left[\!\sum_{j=1}^{N_l} w_{ij}^3 f^l\left(\sum_{k=1}^{N_{l-1}}w_{jk}^{l-1}\left(\dots f^1\left(\sum_{n=1}^{N_0} w_{mn}^1 x_n+ b_m^1\right)\dots\right)+b_k^2\right)+b_1^3\right] # \label{completeNN} \tag{9} # \end{equation} # $$ # which illustrates a basic property of MLPs: The only independent # variables are the input values $x_n$. # # # This confirms that an MLP, despite its quite convoluted mathematical # form, is nothing more than an analytic function, specifically a # mapping of real-valued vectors $\hat{x} \in \mathbb{R}^n \rightarrow # \hat{y} \in \mathbb{R}^m$. # # Furthermore, the flexibility and universality of an MLP can be # illustrated by realizing that the expression is essentially a nested # sum of scaled activation functions of the form # <!-- Equation labels as ordinary links --> # <div id="_auto5"></div> # # $$ # \begin{equation} # f(x) = c_1 f(c_2 x + c_3) + c_4 # \label{_auto5} \tag{10} # \end{equation} # $$ # where the parameters $c_i$ are weights and biases. By adjusting these # parameters, the activation functions can be shifted up and down or # left and right, change slope or be rescaled which is the key to the # flexibility of a neural network. # # # We can introduce a more convenient notation for the activations in an A NN. # # Additionally, we can represent the biases and activations # as layer-wise column vectors $\hat{b}_l$ and $\hat{y}_l$, so that the $i$-th element of each vector # is the bias $b_i^l$ and activation $y_i^l$ of node $i$ in layer $l$ respectively. # # We have that $\mathrm{W}_l$ is an $N_{l-1} \times N_l$ matrix, while $\hat{b}_l$ and $\hat{y}_l$ are $N_l \times 1$ column vectors. # With this notation, the sum becomes a matrix-vector multiplication, and we can write # the equation for the activations of hidden layer 2 (assuming three nodes for simplicity) as # <!-- Equation labels as ordinary links --> # <div id="_auto6"></div> # # $$ # \begin{equation} # \hat{y}_2 = f_2(\mathrm{W}_2 \hat{y}_{1} + \hat{b}_{2}) = # f_2\left(\left[\begin{array}{ccc} # w^2_{11} &w^2_{12} &w^2_{13} \\ # w^2_{21} &w^2_{22} &w^2_{23} \\ # w^2_{31} &w^2_{32} &w^2_{33} \\ # \end{array} \right] \cdot # \left[\begin{array}{c} # y^1_1 \\ # y^1_2 \\ # y^1_3 \\ # \end{array}\right] + # \left[\begin{array}{c} # b^2_1 \\ # b^2_2 \\ # b^2_3 \\ # \end{array}\right]\right). # \label{_auto6} \tag{11} # \end{equation} # $$ # ### Matrix-vector notation and activation # # The activation of node $i$ in layer 2 is # <!-- Equation labels as ordinary links --> # <div id="_auto7"></div> # # $$ # \begin{equation} # y^2_i = f_2\Bigr(w^2_{i1}y^1_1 + w^2_{i2}y^1_2 + w^2_{i3}y^1_3 + b^2_i\Bigr) = # f_2\left(\sum_{j=1}^3 w^2_{ij} y_j^1 + b^2_i\right). # \label{_auto7} \tag{12} # \end{equation} # $$ # This is not just a convenient and compact notation, but also a useful # and intuitive way to think about MLPs: The output is calculated by a # series of matrix-vector multiplications and vector additions that are # used as input to the activation functions. For each operation # $\mathrm{W}_l \hat{y}_{l-1}$ we move forward one layer. # # # # ### Activation functions # # A property that characterizes a neural network, other than its # connectivity, is the choice of activation function(s). As described # in, the following restrictions are imposed on an activation function # for a FFNN to fulfill the universal approximation theorem # # * Non-constant # # * Bounded # # * Monotonically-increasing # # * Continuous # # The second requirement excludes all linear functions. Furthermore, in # a MLP with only linear activation functions, each layer simply # performs a linear transformation of its inputs. # # Regardless of the number of layers, the output of the NN will be # nothing but a linear function of the inputs. Thus we need to introduce # some kind of non-linearity to the NN to be able to fit non-linear # functions Typical examples are the logistic *Sigmoid* # $$ # f(x) = \frac{1}{1 + e^{-x}}, # $$ # and the *hyperbolic tangent* function # $$ # f(x) = \tanh(x) # $$ # The *sigmoid* function are more biologically plausible because the # output of inactive neurons are zero. Such activation function are # called *one-sided*. However, it has been shown that the hyperbolic # tangent performs better than the sigmoid for training MLPs. has # become the most popular for *deep neural networks* # In[1]: get_ipython().run_line_magic('matplotlib', 'inline') """The sigmoid function (or the logistic curve) is a function that takes any real number, z, and outputs a number (0,1). It is useful in neural networks for assigning weights on a relative scale. The value z is the weighted sum of parameters involved in the learning algorithm.""" import numpy import matplotlib.pyplot as plt import math as mt z = numpy.arange(-5, 5, .1) sigma_fn = numpy.vectorize(lambda z: 1/(1+numpy.exp(-z))) sigma = sigma_fn(z) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(z, sigma) ax.set_ylim([-0.1, 1.1]) ax.set_xlim([-5,5]) ax.grid(True) ax.set_xlabel('z') ax.set_title('sigmoid function') plt.show() """Step Function""" z = numpy.arange(-5, 5, .02) step_fn = numpy.vectorize(lambda z: 1.0 if z >= 0.0 else 0.0) step = step_fn(z) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(z, step) ax.set_ylim([-0.5, 1.5]) ax.set_xlim([-5,5]) ax.grid(True) ax.set_xlabel('z') ax.set_title('step function') plt.show() """Sine Function""" z = numpy.arange(-2*mt.pi, 2*mt.pi, 0.1) t = numpy.sin(z) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(z, t) ax.set_ylim([-1.0, 1.0]) ax.set_xlim([-2*mt.pi,2*mt.pi]) ax.grid(True) ax.set_xlabel('z') ax.set_title('sine function') plt.show() """Plots a graph of the squashing function used by a rectified linear unit""" z = numpy.arange(-2, 2, .1) zero = numpy.zeros(len(z)) y = numpy.max([zero, z], axis=0) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(z, y) ax.set_ylim([-2.0, 2.0]) ax.set_xlim([-2.0, 2.0]) ax.grid(True) ax.set_xlabel('z') ax.set_title('Rectified linear unit') plt.show() # ## The multilayer perceptron (MLP) # # The multilayer perceptron is a very popular, and easy to implement approach, to deep learning. It consists of # 1. A neural network with one or more layers of nodes between the input and the output nodes. # # 2. The multilayer network structure, or architecture, or topology, consists of an input layer, one or more hidden layers, and one output layer. # # 3. The input nodes pass values to the first hidden layer, its nodes pass the information on to the second and so on till we reach the output layer. # # As a convention it is normal to call a network with one layer of input units, one layer of hidden # units and one layer of output units as a two-layer network. A network with two layers of hidden units is called a three-layer network etc etc. # # For an MLP network there is no direct connection between the output nodes/neurons/units and the input nodes/neurons/units. # Hereafter we will call the various entities of a layer for nodes. # There are also no connections within a single layer. # # The number of input nodes does not need to equal the number of output # nodes. This applies also to the hidden layers. Each layer may have its # own number of nodes and activation functions. # # The hidden layers have their name from the fact that they are not # linked to observables and as we will see below when we define the # so-called activation $\hat{z}$, we can think of this as a basis # expansion of the original inputs $\hat{x}$. The difference however # between neural networks and say linear regression is that now these # basis functions (which will correspond to the weights in the network) # are learned from data. This results in an important difference between # neural networks and deep learning approaches on one side and methods # like logistic regression or linear regression and their modifications on the other side. # # # ### From one to many layers, the universal approximation theorem # # A neural network with only one layer, what we called the simple # perceptron, is best suited if we have a standard binary model with # clear (linear) boundaries between the outcomes. As such it could # equally well be replaced by standard linear regression or logistic # regression. Networks with one or more hidden layers approximate # systems with more complex boundaries. # # As stated earlier, # an important theorem in studies of neural networks, restated without # proof here, is the [universal approximation # theorem](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.441.7873&rep=rep1&type=pdf). # # It states that a feed-forward network with a single hidden layer # containing a finite number of neurons can approximate continuous # functions on compact subsets of real functions. The theorem thus # states that simple neural networks can represent a wide variety of # interesting functions when given appropriate parameters. It is the # multilayer feedforward architecture itself which gives neural networks # the potential of being universal approximators. # # # # ## Deriving the back propagation code for a multilayer perceptron model # # # # As we have seen now in a feed forward network, we can express the final output of our network in terms of basic matrix-vector multiplications. # The unknowwn quantities are our weights $w_{ij}$ and we need to find an algorithm for changing them so that our errors are as small as possible. # This leads us to the famous [back propagation algorithm](https://www.nature.com/articles/323533a0). # # The questions we want to ask are how do changes in the biases and the # weights in our network change the cost function and how can we use the # final output to modify the weights? # # To derive these equations let us start with a plain regression problem # and define our cost function as # $$ # {\cal C}(\hat{W}) = \frac{1}{2}\sum_{i=1}^n\left(y_i - t_i\right)^2, # $$ # where the $t_i$s are our $n$ targets (the values we want to # reproduce), while the outputs of the network after having propagated # all inputs $\hat{x}$ are given by $y_i$. Below we will demonstrate # how the basic equations arising from the back propagation algorithm # can be modified in order to study classification problems with $K$ # classes. # # # With our definition of the targets $\hat{t}$, the outputs of the # network $\hat{y}$ and the inputs $\hat{x}$ we # define now the activation $z_j^l$ of node/neuron/unit $j$ of the # $l$-th layer as a function of the bias, the weights which add up from # the previous layer $l-1$ and the forward passes/outputs # $\hat{a}^{l-1}$ from the previous layer as # $$ # z_j^l = \sum_{i=1}^{M_{l-1}}w_{ij}^la_i^{l-1}+b_j^l, # $$ # where $b_k^l$ are the biases from layer $l$. Here $M_{l-1}$ # represents the total number of nodes/neurons/units of layer $l-1$. The # figure here illustrates this equation. We can rewrite this in a more # compact form as the matrix-vector products we discussed earlier, # $$ # \hat{z}^l = \left(\hat{W}^l\right)^T\hat{a}^{l-1}+\hat{b}^l. # $$ # With the activation values $\hat{z}^l$ we can in turn define the # output of layer $l$ as $\hat{a}^l = f(\hat{z}^l)$ where $f$ is our # activation function. In the examples here we will use the sigmoid # function discussed in our logistic regression lectures. We will also use the same activation function $f$ for all layers # and their nodes. It means we have # $$ # a_j^l = f(z_j^l) = \frac{1}{1+\exp{-(z_j^l)}}. # $$ # ### Derivatives and the chain rule # # From the definition of the activation $z_j^l$ we have # $$ # \frac{\partial z_j^l}{\partial w_{ij}^l} = a_i^{l-1}, # $$ # and # $$ # \frac{\partial z_j^l}{\partial a_i^{l-1}} = w_{ji}^l. # $$ # With our definition of the activation function we have that (note that this function depends only on $z_j^l$) # $$ # \frac{\partial a_j^l}{\partial z_j^{l}} = a_j^l(1-a_j^l)=f(z_j^l)(1-f(z_j^l)). # $$ # With these definitions we can now compute the derivative of the cost function in terms of the weights. # # Let us specialize to the output layer $l=L$. Our cost function is # $$ # {\cal C}(\hat{W^L}) = \frac{1}{2}\sum_{i=1}^n\left(y_i - t_i\right)^2=\frac{1}{2}\sum_{i=1}^n\left(a_i^L - t_i\right)^2, # $$ # The derivative of this function with respect to the weights is # $$ # \frac{\partial{\cal C}(\hat{W^L})}{\partial w_{jk}^L} = \left(a_j^L - t_j\right)\frac{\partial a_j^L}{\partial w_{jk}^{L}}, # $$ # The last partial derivative can easily be computed and reads (by applying the chain rule) # $$ # \frac{\partial a_j^L}{\partial w_{jk}^{L}} = \frac{\partial a_j^L}{\partial z_{j}^{L}}\frac{\partial z_j^L}{\partial w_{jk}^{L}}=a_j^L(1-a_j^L)a_k^{L-1}, # $$ # ### Bringing it together, first back propagation equation # # We have thus # $$ # \frac{\partial{\cal C}(\hat{W^L})}{\partial w_{jk}^L} = \left(a_j^L - t_j\right)a_j^L(1-a_j^L)a_k^{L-1}, # $$ # Defining # $$ # \delta_j^L = a_j^L(1-a_j^L)\left(a_j^L - t_j\right) = f'(z_j^L)\frac{\partial {\cal C}}{\partial (a_j^L)}, # $$ # and using the Hadamard product of two vectors we can write this as # $$ # \hat{\delta}^L = f'(\hat{z}^L)\circ\frac{\partial {\cal C}}{\partial (\hat{a}^L)}. # $$ # This is an important expression. The second term on the right handside # measures how fast the cost function is changing as a function of the $j$th # output activation. If, for example, the cost function doesn't depend # much on a particular output node $j$, then $\delta_j^L$ will be small, # which is what we would expect. The first term on the right, measures # how fast the activation function $f$ is changing at a given activation # value $z_j^L$. # # Notice that everything in the above equations is easily computed. In # particular, we compute $z_j^L$ while computing the behaviour of the # network, and it is only a small additional overhead to compute # $f'(z^L_j)$. The exact form of the derivative with respect to the # output depends on the form of the cost function. # However, provided the cost function is known there should be little # trouble in calculating # $$ # \frac{\partial {\cal C}}{\partial (a_j^L)} # $$ # With the definition of $\delta_j^L$ we have a more compact definition of the derivative of the cost function in terms of the weights, namely # $$ # \frac{\partial{\cal C}(\hat{W^L})}{\partial w_{jk}^L} = \delta_j^La_k^{L-1}. # $$ # It is also easy to see that our previous equation can be written as # $$ # \delta_j^L =\frac{\partial {\cal C}}{\partial z_j^L}= \frac{\partial {\cal C}}{\partial a_j^L}\frac{\partial a_j^L}{\partial z_j^L}, # $$ # which can also be interpreted as the partial derivative of the cost function with respect to the biases $b_j^L$, namely # $$ # \delta_j^L = \frac{\partial {\cal C}}{\partial b_j^L}\frac{\partial b_j^L}{\partial z_j^L}=\frac{\partial {\cal C}}{\partial b_j^L}, # $$ # That is, the error $\delta_j^L$ is exactly equal to the rate of change of the cost function as a function of the bias. # # We have now three equations that are essential for the computations of the derivatives of the cost function at the output layer. These equations are needed to start the algorithm and they are # # **The starting equations.** # <!-- Equation labels as ordinary links --> # <div id="_auto8"></div> # # $$ # \begin{equation} # \frac{\partial{\cal C}(\hat{W^L})}{\partial w_{jk}^L} = \delta_j^La_k^{L-1}, # \label{_auto8} \tag{13} # \end{equation} # $$ # and # <!-- Equation labels as ordinary links --> # <div id="_auto9"></div> # # $$ # \begin{equation} # \delta_j^L = f'(z_j^L)\frac{\partial {\cal C}}{\partial (a_j^L)}, # \label{_auto9} \tag{14} # \end{equation} # $$ # and # <!-- Equation labels as ordinary links --> # <div id="_auto10"></div> # # $$ # \begin{equation} # \delta_j^L = \frac{\partial {\cal C}}{\partial b_j^L}, # \label{_auto10} \tag{15} # \end{equation} # $$ # An interesting consequence of the above equations is that when the # activation $a_k^{L-1}$ is small, the gradient term, that is the # derivative of the cost function with respect to the weights, will also # tend to be small. We say then that the weight learns slowly, meaning # that it changes slowly when we minimize the weights via say gradient # descent. In this case we say the system learns slowly. # # Another interesting feature is that is when the activation function, # represented by the sigmoid function here, is rather flat when we move towards # its end values $0$ and $1$ (see the above Python codes). In these # cases, the derivatives of the activation function will also be close # to zero, meaning again that the gradients will be small and the # network learns slowly again. # # # # We need a fourth equation and we are set. We are going to propagate # backwards in order to the determine the weights and biases. In order # to do so we need to represent the error in the layer before the final # one $L-1$ in terms of the errors in the final output layer. # # ### Final back propagating equation # # We have that (replacing $L$ with a general layer $l$) # $$ # \delta_j^l =\frac{\partial {\cal C}}{\partial z_j^l}. # $$ # We want to express this in terms of the equations for layer $l+1$. Using the chain rule and summing over all $k$ entries we have # $$ # \delta_j^l =\sum_k \frac{\partial {\cal C}}{\partial z_k^{l+1}}\frac{\partial z_k^{l+1}}{\partial z_j^{l}}=\sum_k \delta_k^{l+1}\frac{\partial z_k^{l+1}}{\partial z_j^{l}}, # $$ # and recalling that # $$ # z_j^{l+1} = \sum_{i=1}^{M_{l}}w_{ij}^{l+1}a_i^{l}+b_j^{l+1}, # $$ # with $M_l$ being the number of nodes in layer $l$, we obtain # $$ # \delta_j^l =\sum_k \delta_k^{l+1}w_{kj}^{l+1}f'(z_j^l), # $$ # This is our final equation. # # We are now ready to set up the algorithm for back propagation and learning the weights and biases. # # # ### Setting up the Back propagation algorithm # # The four equations provide us with a way of computing the gradient of the cost function. Let us write this out in the form of an algorithm. # # First, we set up the input data $\hat{x}$ and the activations # $\hat{z}_1$ of the input layer and compute the activation function and # the pertinent outputs $\hat{a}^1$. # # # # Secondly, we perform then the feed forward till we reach the output # layer and compute all $\hat{z}_l$ of the input layer and compute the # activation function and the pertinent outputs $\hat{a}^l$ for # $l=2,3,\dots,L$. # # # # Thereafter we compute the ouput error $\hat{\delta}^L$ by computing all # $$ # \delta_j^L = f'(z_j^L)\frac{\partial {\cal C}}{\partial (a_j^L)}. # $$ # Then we compute the back propagate error for each $l=L-1,L-2,\dots,2$ as # $$ # \delta_j^l = \sum_k \delta_k^{l+1}w_{kj}^{l+1}f'(z_j^l). # $$ # Finally, we update the weights and the biases using gradient descent for each $l=L-1,L-2,\dots,2$ and update the weights and biases according to the rules # $$ # w_{jk}^l\leftarrow = w_{jk}^l- \eta \delta_j^la_k^{l-1}, # $$ # $$ # b_j^l \leftarrow b_j^l-\eta \frac{\partial {\cal C}}{\partial b_j^l}=b_j^l-\eta \delta_j^l, # $$ # The parameter $\eta$ is the learning parameter discussed in connection with the gradient descent methods. # Here it is convenient to use stochastic gradient descent (see the examples below) with mini-batches with an outer loop that steps through multiple epochs of training.
CompPhysics/MachineLearning
doc/LectureNotes/_build/jupyter_execute/chapter9.py
Python
cc0-1.0
31,431
[ "NEURON" ]
765ad384dc01f1549e50702f979e46c5ad3d964c41a39622054eef37063bb237
# # (C) Copyright 2001/2002 Kai Sterker <kaisterker@linuxgames.com> # Part of the Adonthell Project http://adonthell.linuxgames.com # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY. # # See the COPYING file for more details # # -- Movement schedule for Oliver Redwyne # # He'll walk around in the stable and yard. # When summoned by Orloth, he'll show the player to his room import adonthell import schedule import random def _(message): return message class oliver (schedule.speak): def __init__ (self, mapcharacterinstance): self.myself = mapcharacterinstance # -- make random remarks self.speech = [_("It's so exciting. An Elven Lady, here at Waste's Edge!"), \ _("I gotta hurry before mother complains again."), \ _("Why can't I have a little dog!?")] self.speech_delay = (20, 40) schedule.speak.__init__(self) # -- the tiles around Orloth self.offsets = [(1,1),(1,-1),(-1,1),(-1,-1),(1,0),(0,1),(-1,0),(0,-1)] self.myself.set_callback (self.goal_reached) # -- summoned to the common room player to his room def goto_common_room (self): # -- beam directly to common room, as it is faster that way self.myself.jump_to (1, 13, 7, adonthell.STAND_NORTH) # -- find a free spot near Orloth and the player orloth = adonthell.gamedata_get_character ("Orloth Redwyne") for (x, y) in self.offsets: x = x + orloth.posx () y = y + orloth.posy () if self.myself.set_goal (x, y): break # -- leave the player's room and goto the barn def goto_barn (self): location = self.myself.submap () # -- Player's room if location == 12: self.myself.set_goal (5, 1) # -- First floor elif location == 9: self.myself.set_goal (8, 1) # -- Second floor (this shouldn't happen, but it once did ...) elif location == 14: self.myself.set_goal (4, 1) # -- Common Room elif location == 1: self.myself.set_goal (13, 8) # -- Yard, our final goal (for now) elif location == 0: self.myself.set_goal (25, 15) self.myself.goto_barn = 0 def goal_reached (self): if self.myself.get_val ("goto_barn") == 1: self.goto_barn ()
ksterker/wastesedge
scripts/schedules/mapcharacters/oliver.py
Python
gpl-2.0
2,646
[ "exciting" ]
3ed3a4e150069a30141ad4e26190fa92ca9c3c6d913cdf47719771828acfa2a5
#!/usr/bin/python # -*- coding: utf-8 -*- import math from collections import namedtuple Point = namedtuple("Point", ['x', 'y']) def length(point1, point2): return math.sqrt((point1.x - point2.x)**2 + (point1.y - point2.y)**2) def solve_it(input_data): # Modify this code to run your optimization algorithm # parse the input lines = input_data.split('\n') nodeCount = int(lines[0]) points = [] for i in range(1, nodeCount+1): line = lines[i] parts = line.split() points.append(Point(float(parts[0]), float(parts[1]))) # build a trivial solution # visit the nodes in the order they appear in the file solution = range(0, nodeCount) # calculate the length of the tour obj = length(points[solution[-1]], points[solution[0]]) for index in range(0, nodeCount-1): obj += length(points[solution[index]], points[solution[index+1]]) # prepare the solution in the specified output format output_data = '%.2f' % obj + ' ' + str(0) + '\n' output_data += ' '.join(map(str, solution)) return output_data import sys if __name__ == '__main__': import sys if len(sys.argv) > 1: file_location = sys.argv[1].strip() with open(file_location, 'r') as input_data_file: input_data = input_data_file.read() print(solve_it(input_data)) else: print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/tsp_51_1)')
discreteoptimization/assignment
tsp/solver.py
Python
mit
1,515
[ "VisIt" ]
aa6e110abe73b9eb80133862d28b6f753f54791615f833c8efadd090d1a96ebc
# geotecha - A software suite for geotechncial engineering # Copyright (C) 2018 Rohan T. Walker (rtrwalker@gmail.com) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/gpl.html. """Quick and dirty code to generate some results for testing against geotecha-speccon-integrals-dim1sin type funcitons. Generally tests by: - 2 layers const within a layer - prop = 1 between [0,0.4] and prop = 2 between [0.4, 1]. - 1 layer linear within layer - prop varies from 1 to 2 on [0, 1]. This gives the simpole eqation prop = 1 + x This module contains some `eval` commands that can be a security threat. Rather than fix the problem I've hard coded in some 'if False' instances that you will have to hardcode back to 'if True' for them to work. """ from __future__ import division, print_function from math import pi import sympy from sympy import cos from sympy import sin import numpy #This is bad form to use global variables, but it works PTIB = [pi/2, 3*pi/2] PTPB = [pi, 2 * pi] MS = ['PTIB', 'PTPB'] A1 = 1 A2 = 2 def a_linear(x): """Linear distribution where [xt, yt] = [0,1], [xb, yb] = [1,0]""" return 1 + x def a_const(x): """Unity""" return 1 def two_dim(f, f2 = None, xc = 0.4, x = sympy.Symbol('x')): """ Evaluate string `f` then integrate between 0 and 1. Populate a 2x2 matrix with the definite integral of `f` between [0, 1] (or `f` between [0, `xc`] + `f2` between [`xc`, 1]). Requires MS = ['PTIB', 'PTPB'], and PTIB and PTPB global variables. Integrals at each element depend on values in PTIB and PTPB. As `f` and `f2` will be useed with eval(), all variables in the string must be defined elsewhere except mi and mj which will be defined at each matrix element location. Parameters ---------- f : ``str`` String to be evaluated and then integrated between [0,1] (or [0, `xc`] if f2 is not None). f2 : ``str``, optional String to be evaluated and then integrated between [`xc`, 1] (default is none, i.e. this will not contribute). xc : [0, 1], optional Break point on the left of which `f` will be integrated and on the right of which `f2` will be integrated. (Default xc=0.4). `x`: sympy.Symbol Integrateion varibale Default x=sympy.Symbol('x'). """ #global A1 #global A2 if False: for drainage in MS: A = [[0, 0], [0, 0]] for i, mi in enumerate(eval(drainage)): for j, mj in enumerate(eval(drainage)): if f2: A[i][j] = sympy.N(sympy.integrate(eval(f), (x, 0, xc)) + sympy.integrate(eval(f2), (x, xc, 1.0)), 8) else: A[i][j] = sympy.N(sympy.integrate(eval(f), (x, 0, 1)), 8) print(drainage) print('np.array(' + str(A) + ')') else: print('eval is disabled') def one_dim(f, f2 = None, xc = 0.4, x = sympy.Symbol('x')): """ Evaluate string `f` then integrate between 0 and 1. Populate a 1x2 matrix with the definite integral of `f` between [0, 1] (or `f` between [0, `xc`] + `f2` between [`xc`, 1]). Requires MS = ['PTIB', 'PTPB'], and PTIB and PTPB global variables. Integrals at each element depend on values in PTIB and PTPB. As `f` and `f2` will be useed with eval(), all variables in the string must be defined elsewhere except mi which will be defined at each matrix element location. Parameters ---------- f : ``str`` String to be evaluated and then integrated between [0,1] (or [0, `xc`] if f2 is not None). f2 : ``str``, optional String to be evaluated and then integrated between [`xc`, 1] (default is none, i.e. this will not contribute). xc : [0, 1], optional Break point on the left of which `f` will be integrated and on the right of which `f2` will be integrated. (Default xc=0.4). x : sympy.Symbol Integrateion varibale Default x=sympy.Symbol('x'). """ if True: for drainage in MS: A = [0, 0] for i, mi in enumerate(eval(drainage)): if f2 is not None: A[i] = sympy.N(sympy.integrate(eval(f), (x, 0, xc)) + sympy.integrate(eval(f2), (x, xc, 1.0)), 8) else: A[i] = sympy.N(sympy.integrate(eval(f), (x, 0, 1)), 8) print(drainage) print('np.array(' + str(A) + ')') else: print('eval is disabled') def run_cases(title, cases, fn): """Run a bunch of cases through fn - prints title. - runs and prints out each case. Parameters ---------- title : ``str`` Overall title to print out. cases : ``list`` of ``list`` List of cases e.g. [['case_name', ['arg1', 'arg2']]]. fn : function Function to be called with args from cases. """ print(title) for case, args in cases: print (case) fn(*args) def dim1sin_abf_linear(): """Print some test case data for geotecha.speccon.integrals.dim1sin_abf_linear See Also -------- geotecha.speccon.integrals.dim1sin_abf_linear : Full implementation of the function. geotecha.speccon.test.test_integrals.test_dim1sin_abf_linear : Data is used in testing. """ cases = [ ['a and b const', ['sin(mi*x)*a_const(x)*a_const(x)*sin(mj*x)']], ['a const in two layers, b const', ['sin(mi*x)*A1*a_const(x)*sin(mj*x)','sin(mi*x)*A2*a_const(x)*sin(mj*x)']], ['a linear in one layer, b const', ['sin(mi*x)*a_linear(x)*a_const(x)*sin(mj*x)']], ['a linear in one layer, b linear in one layer', ['sin(mi*x)*a_linear(x)*a_linear(x)*sin(mj*x)']], ] run_cases('dim1sin_abf_linear', cases, two_dim) def dim1sin_af_linear(): """Print some test case data for geotecha.speccon.integrals.dim1sin_af_linear See Also -------- geotecha.speccon.integrals.dim1sin_af_linear : Full implementation of the function. geotecha.speccon.test.test_integrals.test_dim1sin_af_linear : Data is used in testing. """ cases = [ ['a const', ['sin(mi*x)*a_const(x)*sin(mj*x)']], ['a const in two layers', ['sin(mi*x)*A1*sin(mj*x)','sin(mi*x)*A2*sin(mj*x)']], ['a linear in one layer', ['sin(mi*x)*a_linear(x)*sin(mj*x)']], ] run_cases('dim1sin_abf_linear', cases, two_dim) def dim1sin_D_aDf_linear(): """Print some test case data for geotecha.speccon.integrals.dim1sin_D_aDf_linear See Also -------- geotecha.speccon.integrals.dim1sin_D_aDf_linear : Full implementation of the function. geotecha.speccon.test.test_integrals.test_dim1sin_D_aDf_linear : Data is used in testing. """ cases = [ ['a const', ['sin(mi*x)*a_const(x)*sympy.diff(sin(mj*x),x,2)']], ['a const in two layers', ['sin(mi*x)*A1*sympy.diff(sin(mj*x),x,2) - A1*sympy.diff(sin(mi*x)*sympy.diff(sin(mj*x),x), x)','sin(mi*x)*A2*sympy.diff(sin(mj*x),x,2) - A2*sympy.diff(sin(mi*x)*sympy.diff(sin(mj*x),x), x)']], ['a linear in one layer', ['sin(mi*x)*a_linear(x)*sympy.diff(sin(mj*x),x,2) + sin(mi*x)*a_const(x)*sympy.diff(sin(mj*x),x)']], ['a linear within two layers', #a goes form [0,1] to [0.4, 1.4] and [0.4, 1] to [1, 1.6], i.e. slope 1 all the time ['sin(mi*x)*a_linear(x)*sympy.diff(sin(mj*x),x,2) + sin(mi*x)*a_const(x)*sympy.diff(sin(mj*x),x) - sympy.diff(a_linear(x)*sin(mi*x)*sympy.diff(sin(mj*x),x), x)', 'sin(mi*x)*(a_linear(x)-0.4)*sympy.diff(sin(mj*x),x,2) + sin(mi*x)*a_const(x)*sympy.diff(sin(mj*x),x) - sympy.diff((a_linear(x) - 0.4)*sin(mi*x)*sympy.diff(sin(mj*x),x), x)']], ] run_cases('dim1sin_D_aDf_linear', cases, two_dim) def dim1sin_ab_linear(): """Print some test case data for geotecha.speccon.integrals.dim1sin_ab_linear See Also -------- geotecha.speccon.integrals.dim1sin_ab_linear : Full implementation of the function. geotecha.speccon.test.test_integrals.test_dim1sin_ab_linear : Data is used in testing. """ cases = [ ['a const, b const', ['sin(mi*x)*a_const(x)*a_const(x)']], ['a const in two layers, b const', ['sin(mi*x)*A1','sin(mi*x)*A2']], ['a linear in one layer, b const', ['sin(mi*x)*a_linear(x)']], ['a linear in one layer, b linear in one layer', ['sin(mi*x)*a_linear(x)*a_linear(x)']], ] run_cases('dim1sin_ab_linear', cases, one_dim) def dim1sin_abc_linear(): """Print some test case data for geotecha.speccon.integrals.dim1sin_abc_linear See Also -------- geotecha.speccon.integrals.dim1sin_abc_linear : Full implementation of the function. geotecha.speccon.test.test_integrals.test_dim1sin_abc_linear : Data is used in testing. """ cases = [ ['a const, b const, c const', ['sin(mi*x)*a_const(x)']], ['a const in two layers, b const, c const', ['sin(mi*x)*A1','sin(mi*x)*A2']], ['a linear in one layer, b const, c const', ['sin(mi*x)*a_linear(x)']], ['a linear in one layer, b linear in one layer, c linear in one layer', ['sin(mi*x)*a_linear(x)*a_linear(x)*a_linear(x)']], ] run_cases('dim1sin_abc_linear', cases, one_dim) def dim1sin_D_aDb_linear(): """Print some test case data for geotecha.speccon.integrals.dim1sin_D_aDb_linear See Also -------- geotecha.speccon.integrals.dim1sin_D_aDb_linear : Full implementation of the function. geotecha.speccon.test.test_integrals.test_dim1sin_D_aDb_linear : Data is used in testing. """ cases = [] #the commented out cases below are incorrect owing to the inclusion of dirac integrations at the end points. the tests rely on data generated by hand. # cases = [ # ['a const, b const', # ['sin(mi*x)*sympy.diff(a_const(x)*sympy.diff(a_const(x),x),x)']], # ['a const, b linear in one layer', # ['sin(mi*x)*sympy.diff(a_const(x)*sympy.diff(a_linear(x),x),x)']], # ['a linear witin one layer, b linear accross both layers', # ['sin(mi*x)*sympy.diff(a_linear(x)*sympy.diff(a_linear(x),x),x)']], # ['a const witin two layer, b linear in one layers', # ['-sympy.diff(A1*sin(mi*x))', '-sympy.diff(A2*sin(mi*x))']],# a goes form [0,1] to [0.4, 1.4] and [0.4, 1] to [1, 1.6], i.e. slope 1 all the time # ['a linear within two layers, b linear accross both layers', # ['sin(mi*x)-sympy.diff(a_linear(x)*sin(mi*x),x)', 'sin(mi*x) - sympy.diff((a_linear(x)-xc)*sin(mi*x),x)']], # ['a const accross both layers, b linear within two layers', #a goes form [0,1] to [0.4, 1.4] and [0.4, 1] to [1, 2.2] i.e. slope 1 and then slope 2 # ['-sympy.diff(A1*sin(mi*x),x)', '-sympy.diff(A2*sin(mi*x),x)']], # ] run_cases('dim1sin_D_aDb_linear', cases, one_dim) def dim1sin_a_linear_between(): """Print some test case data for geotecha.speccon.integrals.dim1sin_a_linear_between See Also -------- geotecha.speccon.integrals.dim1sin_a_linear_between : Full implementation of the function. geotecha.speccon.test.test_integrals.test_dim1sin_a_linear_between : Data is used in testing. """ sympy.var('x, m') f1 = 1*sin(m*x)#between 0,0.4 f2 = 2*sin(m*x)#between 0.4, 0.6 f3 = 3*sin(m*x)#between 0.6, 1 f11 = (1+x)*sin(m*x)#between 0,0.4 f22 = (1+x-0.4)*sin(m*x)#between 0.4, 0.6 f33 = (1+x-0.6)*sin(m*x)#between 0.6, 1 cases = [ ['3 layers, a const = 1 betwn[0,0.4] 2 betw[0.4,0.6] 3 betw[0.6,1], z = [0.1, 0.3]', 'sympy.integrate(f1.subs(m,mi),(x,0.1,0.3))'], ['3 layers, a const = 1 betwn[0,0.4] 2 betw[0.4,0.6] 3 betw[0.6,1], z = [0, 0.4]', 'sympy.integrate(f1.subs(m,mi),(x,0,0.4))'], ['3 layers, a const = 1 betwn[0,0.4] 2 betw[0.4,0.6] 3 betw[0.6,1], z = [0.2, 0.5]', 'sympy.integrate(f1.subs(m,mi),(x,0.2,0.4))+sympy.integrate(f2.subs(m,mi),(x,0.4,0.5))'], ['3 layers, a const = 1 betwn[0,0.4] 2 betw[0.4,0.6] 3 betw[0.6,1], z = [0.2, 0.8]', 'sympy.integrate(f1.subs(m,mi),(x,0.2,0.4))+sympy.integrate(f2.subs(m,mi),(x,0.4,0.6))+sympy.integrate(f3.subs(m,mi),(x,0.6,0.8))'], ['3 layers, a linear = 1+x betwn[0,0.4] 0.6+x betw[0.4,0.6] 0.4+x betw[0.6,1], z = [0.2, 0.8]', 'sympy.integrate(f11.subs(m,mi),(x,0.2,0.4))+sympy.integrate(f22.subs(m,mi),(x,0.4,0.6))+sympy.integrate(f33.subs(m,mi),(x,0.6,0.8))'], ] for case in cases: for drainage in MS: A = [] for mi in eval(drainage): A.append(sympy.N( eval(case[1]) ) ) print("%s, %s" % (case[0], drainage)) print('np.array(' + str(A) + ')') print ('******') def main(): """Run all the test data generations""" dim1sin_af_linear() dim1sin_abf_linear() dim1sin_D_aDf_linear() dim1sin_abc_linear() dim1sin_D_aDb_linear() dim1sin_a_linear_between() if __name__ == '__main__': main()
rtrwalker/geotecha
geotecha/speccon/test/dim1sin_test_data_gen.py
Python
gpl-3.0
14,033
[ "DIRAC" ]
9b9724e1d02b858dc984315cad058f46ea993341784d17dc762c7d3f506b4c2f
''' Created on 07.05.2017 @author: Mr. Jones ''' '''receive values from a given node and add them to the given dictionary''' class ExtractValueVisitor(): def __init__(self): self.optimization = 0 self.values = {} def visit(self, node,parent): method = 'visit_%s' % node.__class__.__name__ return getattr(self, method, self.generic_visit)(node,parent) def generic_visit(self, node,parent): for child in node: self.visit(child,parent) def visit_VarDecl(self, node): identifier = node.identifier.value self.values[identifier] = node.initializer
mrj0n3s/jimplify
Jimplify/jimplify/visitors/extractvaluevisitor.py
Python
mit
674
[ "VisIt" ]
e9513bd9727381fe66117c1f074c5d0ec84e2ac8dd672c1aff211b00b4705abd
from sys import argv import matplotlib.pyplot as plt from ase.dft import STM from gpaw import restart filename = argv[1] z0 = 8 bias = 1.0 atoms, calc = restart(filename, txt=None) stm = STM(atoms, symmetries=[0, 1, 2]) c = stm.get_averaged_current(bias, z0) print 'Average current at z=%f: %f' % (z0, c) # Get 2d array of constant current heights: x, y, h = stm.scan(bias, c) print 'Min: %.2f Ang, Max: %.2f Ang' % (h.min(), h.max()) plt.contourf(x, y, h, 40) plt.hot() plt.colorbar() plt.show()
robwarm/gpaw-symm
doc/exercises/stm/stm.py
Python
gpl-3.0
508
[ "ASE", "GPAW" ]
21fffa7ac70c92d42c9c3460f9670650cae9b2f72eaa9583f0eea37242d86bf5
# # Copyright (C) 2009, Jose Antonio Martin H. # #http://rl-glue-ext.googlecode.com/ # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # $Revision: 465 $ # $Date: 2009-01-28 19:55:32 -0700 (Wed, 28 Jan 2009) $ # $Author: xjamartinh $ # $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/utils/TaskSpecVRLGLUE3.py $ """ Brian Tanner: The license above is what matters most. I think you can all take the comments below as non-binding suggestions ;) This file was written by Jose Antonio Martin H. for the RL-Glue Extensions project. you are allowed to use it (and see it) fully but subject to the next conditions 1. to not cause damage to any person 2. to not use it to earn money except when you give me the 50% 3. to use it to produce a state of the art RL agent, if not, think a lot and then come back to write a super agent. This code is a 'parser' for the RL-Glue 3.0 TaskSpec. It does not make any duplication of information, that is, what you get is always a view of the original string. This is not the classic state-machine or automata approach to parsing languages so in particular you will se that the parser is robust to a big set of taskpec string malformations still getting the right information. blablabla Last modifed 22-1-2009 by Jose Antonio Martin H. Added enforced parsing error catching. """ import sys try: import psyco psyco.full() except ImportError: pass class TaskSpecParser: """ RL-Glue TaskSpec Sparser V3 """ # BEGIN: change made by: Akshay Narayan (06-01-2015:1200) #w = ["VERSION","PROBLEMTYPE","DISCOUNTFACTOR", "OBSERVATIONS","ACTIONS","REWARDS","EXTRA"] # w[0] = VERSION ... w[7] = NUMOFOBJECTIVES w = ["VERSION", "PROBLEMTYPE", "DISCOUNTFACTOR", "OBSERVATIONS", "ACTIONS", "REWARDS", "NUMOFOBJECTIVES", "EXTRA"] # END: change made by: Akshay Narayan (06-01-2015:1200) v = ["INTS","DOUBLES","CHARCOUNT"] expected_version = "RL-Glue-3.0" valid = True last_error = "" def __init__(self,ts): self.ts = ts if self.expected_version != self.getVersion(): print ("Warning: TaskSpec Version is not "+self.expected_version+" but "+self.getVersion()) self.valid = False while self.ts.find(" ")!=-1: self.ts = self.ts.replace(" "," ") def getVersion(self): a = len(self.w[0])+1 return self.ts[a:self.ts.find(" ",a)] def Validate(self): if not self.valid: print ("Warning: TaskSpec String is invalid: "+self.last_error) return False return True def getValue(self,i,ts,w): try: a = ts.index(w[i]) + len(w[i]) + 1 except: #ValueError: #raise AttributeError("Malformed TaskSpec String: could not find the "+w[i]+" keyword") self.last_error = "could not find the "+w[i]+" keyword" print ("Warning: Malformed TaskSpec String: " +self.last_error) self.valid = False return "" b=None if (i+1)<len(w): try: b = ts.index(w[i+1])-1 except: #ValueError: #raise AttributeError("Malformed TaskSpec String: could not find the "+w[i+1]+" keyword") self.last_error = "could not find the "+w[i+1]+" keyword" print ("Warning: Malformed TaskSpec String: " +self.last_error) self.valid = False return "" return ts[a:b].strip() def getProblemType(self): if not self.Validate(): return "" return self.getValue(1,self.ts,self.w) def getDiscountFactor(self): if not self.Validate(): return "" return float(self.getValue(2,self.ts,self.w)) def CompleteVars(self,str_in): if not self.Validate(): return "" """ forces the vars to have ints doubles and charcount """ if self.v[0] not in str_in: str_in = self.v[0]+" (0 0 0) " + str_in if self.v[2] not in str_in: str_in= str_in.rstrip()+" "+self.v[2]+" 0 " if self.v[1] not in str_in: i = str_in.find(self.v[2]) str_in= str_in[0:i]+self.v[1]+" (0 0 0) "+str_in[i:] return str_in def getObservations(self): if not self.Validate(): return "" str_o = self.getValue(3,self.ts,self.w) return self.CompleteVars(str_o) def getActions(self): if not self.Validate(): return "" str_a = self.getValue(4,self.ts,self.w) return self.CompleteVars(str_a) def getReward(self): if not self.Validate(): return "" return self.getValue(5,self.ts,self.w) # BEGIN: change made by: Akshay Narayan (06-01-2015:1209) def getNumOfObjectives(self): if not self.Validate(): return "" return int(self.getValue(6, self.ts, self.w)) # END: change made by: Akshay Narayan (06-01-2015:1209) def getExtra(self): if not self.Validate(): return "" # BEGIN: change made by: Akshay Narayan (06-01-2015:1237) #return self.getValue(6,self.ts,self.w) return self.getValue(7,self.ts,self.w) # END: change made by: Akshay Narayan (06-01-2015:1237) def isSpecial(self,maxOrMin): if type(maxOrMin)!=type(""): return False if maxOrMin=="UNSPEC" or maxOrMin=="NEGINF" or maxOrMin=="POSINF": return True; else: return False; def getRange(self,str_input): if not self.Validate(): return "" try: str_input = str_input.replace("UNSPEC","'UNSPEC'") str_input = str_input.replace("NEGINF","'NEGINF'") str_input = str_input.replace("POSINF","'POSINF'") str_input = str_input.replace(" ",",") r = eval(str_input) if len(r)==2: return [list(r)] out = r[0]*([[r[1],r[2]]]) return out except: self.last_error = "error ocurred while parsing a Range in "+str_input print ("Warning: Malformed TaskSpec String: " +self.last_error) print (sys.exc_info()) self.valid = False return "" def getRewardRange(self): if not self.Validate(): return "" str_reward = self.getReward() return self.getRange(str_reward) def getVarInfoRange(self,i,ts,w): self.Validate() a = ts.index(w[i]) b = ts.index(w[i+1])+1 return ts[a:b] def GetVarValue(self,i,str_o): if not self.Validate(): return "" str_r = self.getValue(i,str_o,self.v) str_r = str_r.replace(") (",")#(") # Ok I can parse it but this (there is no space or there is an extra space in ranges) # should be checked since this means that the taskspec is malformed str_r = str_r.replace("( ","(") str_r = str_r.replace(" )",")") str_r = str_r.replace(")(",")#(") parts = str_r.split("#") obs=[] for p in parts: obs.extend(self.getRange(p)) return obs def getIntObservations(self): if not self.Validate(): return "" return self.GetVarValue(0,self.getObservations()) def getDoubleObservations(self): if not self.Validate(): return "" return self.GetVarValue(1,self.getObservations()) def getCharCountObservations(self): if not self.Validate(): return "" str_o = self.getObservations() return int(self.getValue(2,str_o,self.v)) def getIntActions(self): if not self.Validate(): return "" return self.GetVarValue(0,self.getActions()) def getDoubleActions(self): if not self.Validate(): return "" return self.GetVarValue(1,self.getActions()) def getCharCountActions(self): if not self.Validate(): return "" str_a = self.getActions() return int(self.getValue(2,str_a,self.v)) def test(): # you can cut the taskspec by the main words with new line # BEGIN: change made by: Akshay Narayan (06-01-2015:1230) #ts ="""VERSION RL-Glue-3.0 PROBLEMTYPE episodic DISCOUNTFACTOR .7 OBSERVATIONS INTS (NEGINF 1) ( 2 -5 POSINF ) DOUBLES (2 -1.2 0.5 )(-.07 .07) (UNSPEC 3.3) (0 100.5) CHARCOUNT 32 # ACTIONS INTS (5 0 4) DOUBLES (-.5 2) (2 7.8 9) (NEGINF UNSPEC) REWARDS (-5.0 5.0) EXTRA some other stuff goes here""" ts ="""VERSION RL-Glue-3.0 PROBLEMTYPE episodic DISCOUNTFACTOR .7 OBSERVATIONS INTS (NEGINF 1) ( 2 -5 POSINF ) DOUBLES (2 -1.2 0.5 )(-.07 .07) (UNSPEC 3.3) (0 100.5) CHARCOUNT 32 ACTIONS INTS (5 0 4) DOUBLES (-.5 2) (2 7.8 9) (NEGINF UNSPEC) REWARDS (-5.0 5.0) NUMOFOBJECTIVES 3 EXTRA some other stuff goes here""" # END: change made by: Akshay Narayan (06-01-2015:1230) print (ts) print () print () TaskSpec = TaskSpecParser(ts) if TaskSpec.valid: print ("=======================================================================================================") print ("Version: ["+TaskSpec.getVersion()+"]") print ("ProblemType: ["+TaskSpec.getProblemType()+"]") print ("DiscountFactor: ["+str(TaskSpec.getDiscountFactor())+"]") print ("=======================================================================================================") # BEGIN: change made by: Akshay Narayan (06-01-2015:1228) print ("Number of objectives: ["+str(TaskSpec.getNumOfObjectives())+"]") # END: change made by: Akshay Narayan (06-01-2015:1228) print ("=======================================================================================================") print ("\t \t \t \t Observations") print ("=======================================================================================================") print ("Observations: ["+TaskSpec.getObservations()+"]") print ("Integers:",TaskSpec.getIntObservations()) print ("Doubles: ",TaskSpec.getDoubleObservations()) print ("Chars: ",TaskSpec.getCharCountObservations()) print ("=======================================================================================================") print ("\t \t \t \t Actions") print ("======================================================================================================") print ("Actions: ["+TaskSpec.getActions()+"]") print ("Integers:",TaskSpec.getIntActions()) print ("Doubles: ",TaskSpec.getDoubleActions()) print ("Chars: ",TaskSpec.getCharCountActions()) print ("=======================================================================================================") print ("Reward :["+TaskSpec.getReward()+"]") print ("Reward Range:",TaskSpec.getRewardRange()) print ("Extra: ["+TaskSpec.getExtra()+"]") print ("remember that by using len() you get the cardinality of lists!") print ("Thus:") print ("len(",TaskSpec.getDoubleObservations(),") ==> ",len(TaskSpec.getDoubleObservations())," Double Observations") print (TaskSpec.isSpecial("NEGINF")); if __name__=="__main__": test()
okkhoy/mo-rlglue-python-codec
rlglue/utils/TaskSpecVRLGLUE3.py
Python
mit
10,559
[ "Brian" ]
2342da887fbeba0a7acc951291505d5ccaf69cf4831f8fba4fff539daec0a732
"""Collection of functions to process mini batches.""" import numpy as np from sklearn.preprocessing import OneHotEncoder def invert_full_matrix_np(full_adjacency): full_adjacency = np.squeeze(full_adjacency) n_nodes = full_adjacency.shape[1] full_adjacency = np.append(np.zeros([1, n_nodes]), full_adjacency, axis=0) full_adjacency[0, 0] = 1 adjacency = np.eye(n_nodes) - np.linalg.inv(full_adjacency) return adjacency[1:, :] def batch_symmetrize_np(input_matrix, batch_size, n_nodes): """ Take an n_nodes - 1 x n_nodes matrix and symmetrizes it. It concatenates a row of zeros with the matrix, adds the transpose and then removes the padded row. Parameters ---------- input_matrix: theano tensor batch_size x n_nodes - 1 x n_nodes batch_size: int batch size n_nodes: int number of nodes of the matrix """ input_matrix = np.concatenate([np.zeros(shape=[batch_size, 1, n_nodes]), input_matrix], axis=1) result = np.zeros(shape=[batch_size, n_nodes, n_nodes]) for n in range(input_matrix.shape[0]): result[n, :, :] = np.squeeze(input_matrix[n, :, :]) + \ np.squeeze(input_matrix[n, :, :].T) return result[:, 1:, :] def full_matrix_np(adjacency, n_nodes): return np.linalg.inv(np.eye(n_nodes) - adjacency) def masked_softmax_full_np(input_data): batch_size = input_data.shape[0] n_nodes = input_data.shape[2] output_data = np.append(np.zeros([batch_size, 1, n_nodes]), input_data, axis=1) for i in range(batch_size): output_data[i, :, :] = \ full_matrix_np(np.squeeze(output_data[i, :, :]), n_nodes) return output_data[:, 1:, :] def features(X_parent, X_locations): """ Get the features of the dataset. Parameters ---------- X_parent: an array of size (batch_size x n_nodes - 1 x n_nodes) the adjacency of each matrix. X_locations: an array of size (batch_size x n_nodes - 1 x 3) the locations of each nodes. Returns ------- X_features: an array of size (batch_size x n_nodes x n_features) The features currently supports: - The adjacency - The full adjacency - locations - distance from immediate parents """ batch_size = X_parent.shape[0] n_nodes = X_parent.shape[2] X_adjacency = np.append(np.zeros([batch_size, 1, n_nodes]), X_parent, axis=1) X_locations = np.append(np.zeros([batch_size, 1, 3]), X_locations, axis=1) X_full_adjacency = np.zeros([batch_size, n_nodes, n_nodes]) X_distance = np.zeros([batch_size, n_nodes, 3]) for sample in range(batch_size): X_full_adjacency[sample, :, :] = \ full_matrix_np(np.squeeze(X_adjacency[sample, :, :]), n_nodes) X_distance[sample, :, :] = \ np.dot(np.eye(n_nodes) - np.squeeze(X_adjacency[sample, :, :]), np.squeeze(X_locations[sample, :, :])) X_features = np.append([X_adjacency, X_full_adjacency, X_locations, X_distance], axis=2) return X_features def get_batch(X_parent_cut, batch_size, n_nodes): """ Make a batch of morphological and geometrical data. Parameters ----------- training_data: dict of dicts each inner dict is an array 'geometry': 3-d arrays (locations) n_samples x n_nodes - 1 x 3 'morphology': 2-d arrays n_samples x n_nodes - 1 (parent sequences) example: training_data['geometry']['n20'][0:10, :, :] gives the geometry for the first 10 neurons training_data['geometry']['n20'][0:10, :] gives the parent sequences for the first 10 neurons here, 'n20' indexes a key corresponding to 20-node downsampled neurons. batch_size: int batch size. batch_counter: the index of the selected batches the data for batch are selected from the index (batch_counter - 1) * batch_size to batch_counter * batch_size of whole data. n_nodes: int subsampled resolution of the neurons. Returns ------- X_locations_real: an array of size (batch_size x n_nodes - 1 x 3) the location of the nodes of the neuorns. X_parent_real: an array of size (batch_size x n_nodes x n_nodes - 1) the parent sequence for parent of the neuron. """ enc = OneHotEncoder(n_values=n_nodes) X_parent_real = np.reshape(enc.fit_transform(X_parent_cut).toarray(), [batch_size, n_nodes - 1, n_nodes]) return X_parent_real def gen_batch(geom_model, morph_model, conditioning_rule='mgd', batch_size=64, n_nodes=20, input_dim=100): """ Generate a batch of samples from generators. Parameters ---------- geom_model: list of keras objects geometry generator morph_model: list of keras objects morphology generator conditioning_rule: str 'mgd': P_w(disc_loss|g,m) P(g|m) P(m) 'gmd': P_w(disc_loss|g,m) P(m|g) P(g) batch_size: int batch size n_nodes: list of ints number of nodes input_dim: int dimensionality of noise input Returns ------- locations: float (batch_size x 3 x n_nodes - 1) batch of generated locations parent: float (batch_size x n_nodes x n_nodes - 1) batch of generated morphology """ locations = None parent = None # Generate noise code noise_code = np.random.rand(batch_size, 1, input_dim) # Generate geometry and morphology if conditioning_rule == 'mgd': parent = morph_model.predict(noise_code) locations = cond_geom_model.predict([noise_code, parent]) elif conditioning_rule == 'gmd': locations = geom_model.predict(noise_code) parent = cond_morph_model.predict([noise_code, locations]) elif conditioning_rule == 'none': locations = geom_model.predict(noise_code) parent = morph_model.predict(noise_code) return locations, parent
tree-gan/BonsaiNet
batch_utils.py
Python
mit
6,481
[ "NEURON" ]
4ef342e179eb0ef1c437faedb2a0adb89a9012ac11ce17d6467271bfefdd1905
import cv2 import numpy import operator from logger import logger def watershed(image, grayed, edges, min_ratio, max_count): """ Applies watershed algorithm to 'image' with markers derived from 'edges' Args: image: original image grayed: grayed and optionally blurred version of 'image' edges: a binary image min_ratio: only contours in 'edges' with an area bigger are used as markers max_count: maximum number of segments to derive Returns segments, markers, count """ markers = edges.copy() _, markers1, _ = extract_segments( grayed, markers, min_ratio=min_ratio, max_count=max_count ) markers32 = numpy.int32(markers1) cv2.watershed(image, markers32) watersheded = cv2.convertScaleAbs(markers32) _, edges = cv2.threshold( watersheded, 1, 255, cv2.THRESH_BINARY_INV ) segments, markers, count = extract_segments( grayed, edges ) return segments, markers, count def canny(image, gaussian_ksize=(7, 7), threshold1=20, threshold2=100): """ Computes Gaussian blurred grayscale and Canny edges Args: image = image array; use cv2.imread(...) to load from file gaussian_ksize = filter size e.g. (5, 5) threshold1 = first threshold of the hysteresis procedure threshold2 = second threshold of the hysteresis procedure Returns (grayscale, edges) """ if len(image.shape) == 3: grayed = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) elif len(image.shape) == 2: grayed = image else: raise Exception("Unsupported input image") blurred = cv2.GaussianBlur(grayed, gaussian_ksize, 0) edges = cv2.Canny(blurred, threshold1, threshold2) return (grayed, edges) def otsu(image, gaussian_ksize=(7, 7)): """ Computes Gaussian blurred grayscale and Otsu threshold edges Args: image = image array; use cv2.imread(...) to load from file gaussian_ksize = filter size e.g. (5, 5) Returns (grayscale, edges) """ grayed = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(grayed, gaussian_ksize, 0) ret, edges = cv2.threshold( blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU ) return (grayed, edges) def seg_otsu_watershed(image, min_ratio=0.0001, max_count=100): grayed, edges = otsu(image) return watershed(image, grayed, edges, min_ratio, max_count) def seg_canny_watershed(image, min_ratio=0.0001, max_count=100): grayed, edges = canny(image) return watershed(image, grayed, edges, min_ratio, max_count) def seg_slic(image, min_ratio=0, max_count=100): from skimage.segmentation import slic from skimage.segmentation import mark_boundaries from skimage import img_as_ubyte grayed = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) segments = slic(image, sigma=5) black = numpy.zeros(image.shape, numpy.uint8) edges_sci = mark_boundaries(black, segments, color=(1, 1, 1)) edges_gray = cv2.cvtColor(img_as_ubyte(edges_sci), cv2.COLOR_BGR2GRAY) ret, edges = cv2.threshold( edges_gray, 80, 255, cv2.THRESH_BINARY ) segments, markers, count = extract_segments( grayed, edges, min_ratio, max_count ) return segments, markers, count def extract_segments(grayed, edges, min_ratio=0, max_count=100): contours, hierarchy = cv2.findContours( edges, cv2.cv.CV_RETR_TREE, cv2.cv.CV_CHAIN_APPROX_SIMPLE ) markers = numpy.zeros(grayed.shape, numpy.uint8) total_area = edges.shape[0] * edges.shape[1] contours_total = len(contours) contours_no_child = [] for i in range(contours_total): h = hierarchy[0][i] if h[2] == -1: contours_no_child.append(contours[i]) else: logger.debug('Skipped child contour {i}'.format(i=i)) contours_filtered = [] for contour in contours_no_child: area = float(cv2.contourArea(contour)) ratio = area / total_area if ratio >= min_ratio: contours_filtered.append((contour, area)) else: logger.debug('Skipped contour with ratio %f' % ratio) contours_filtered_sorted = sorted( contours_filtered, key=operator.itemgetter(1), reverse=True ) assert(max_count < 255 - 2) contours_count = min(len(contours_filtered_sorted), max_count) contours_capped = contours_filtered_sorted[:contours_count] contours_no_tuple = [c for c, v in contours_capped] # color 1: border colors = range(2, 255) import random random.shuffle(colors) for i in range(contours_count): cv2.drawContours(markers, contours_no_tuple, i, colors[i], -1) logger.info( 'Segments found {total}, {count} satisfied min_ratio'.format( total=contours_total, count=contours_count )) segments = contours_no_tuple return segments, markers, contours_count
xiwenc/cbir-invenio
src/segmentation.py
Python
mit
5,133
[ "Gaussian" ]
1c5e83c375d9129f6f4fa9ed3308df4c0f215aeb414bcd73622fcd4ea0c185c8
from __future__ import print_function, division import os,unittest,numpy as np from pyscf.nao import gw as gw_c class KnowValues(unittest.TestCase): def test_rescf(self): """ reSCF then G0W0 """ fc = \ """-1.176137582599898090e+00 -6.697973984258517310e-01 -5.155143130039178123e-01 -4.365448724088398791e-01 2.104535161143837596e-01 2.985738190760626187e-01 5.383631831528181699e-01 5.960427511708059622e-01 6.298425248864513160e-01 6.702150570679562547e-01 7.488635881500678160e-01 1.030485556414411974e+00 1.133596236538136015e+00 1.308430815822860804e+00 1.322564760433334374e+00 1.444841711461231304e+00 1.831867938363858750e+00 1.902393397937107045e+00 1.977107479006525059e+00 2.119748779125555149e+00 2.150570967014801216e+00 2.899024682518652973e+00 3.912773887375614823e+00 """ dname = os.path.dirname(os.path.abspath(__file__)) gw = gw_c(label='water', cd=dname, verbosity=0, nocc=8, nvrt=6, nocc_conv=4, nvrt_conv=4, rescf=True, tol_ia=1e-6,) gw.kernel_gw() np.savetxt('eigvals_g0w0_pyscf_rescf_water_0061.txt', gw.mo_energy_gw[0,:,:].T) #gw.report() for e,eref_str in zip(gw.mo_energy_gw[0,0,:],fc.splitlines()): self.assertAlmostEqual(e,float(eref_str)) if __name__ == "__main__": unittest.main()
gkc1000/pyscf
pyscf/nao/test/test_0061_gw_rescf_g0w0_h2o.py
Python
apache-2.0
1,273
[ "PySCF" ]
7dd219c40469cf1b043a7d387bc7311d390411dd78d7610c408004bd579ec7b5
import sys # Include relative path for mimpy library. import mimpy.mesh.hexmeshwmsfracs as mesh import mimpy.mfd.mfd as mfd import numpy as np res_mfd = mfd.MFD() res_mfd.set_compute_diagonality(True) res_mfd.set_m_e_construction_method(0) #Define the permeability function def K(p, i, j, k): return np.eye(3) #set the mesh and an instance of the HexMesh class res_mesh = mesh.HexMeshWMSFracs() #The modification function is applied to the points of the mesh. #In this case no change is applied. def mod_function(p, i, j, k): return p frac_file = open("fracs.dat") frac_file.readline() frac_list = [] count = 0 for line in frac_file: line_split = line.split() new_frac = mesh.FracData() new_frac.azimuth = float(line_split[0])/180.*np.pi new_frac.dip = float(line_split[1])/180.*np.pi new_frac.a = float(line_split[2])/2. new_frac.b = float(line_split[3])/2. new_frac.id = count count +=1 frac_list.append(new_frac) new_frac.normal = new_frac.get_normal() point_x = float(line_split[4]) point_y = float(line_split[5]) point_z = float(line_split[6]) new_frac.center = np.array([point_x, point_y, point_z]) new_frac.generate_polygon(23) res_mesh.build_mesh(22, 22, 22, 300., 300., 300., K, mod_function) count = 1 fracture_faces_list = [] for frac in frac_list: frac.output_vtk("frac" + str(count)) count += 1 fracture_faces_list.append(res_mesh.add_fractures(frac)) count = 0 for key in res_mesh.fracture_faces_multi: fracture_faces = res_mesh.fracture_faces_multi[key] count += 1 res_mesh.output_vtk_faces("faces_"+str(count), list(fracture_faces)) res_mesh.build_frac_from_faces(list(fracture_faces)) res_mfd.set_mesh(res_mesh) res_mfd.apply_dirichlet_from_function(0, lambda x:0.) res_mfd.apply_dirichlet_from_function(1, lambda x:10.) res_mfd.apply_dirichlet_from_function(2, lambda x:0.) res_mfd.apply_dirichlet_from_function(3, lambda x:0.) res_mfd.apply_dirichlet_from_function(4, lambda x:0.) res_mfd.apply_dirichlet_from_function(5, lambda x:0.) #Build the LHS and RHS. res_mfd.build_lhs() res_mfd.build_rhs() #Solve the linear system. res_mfd.solve() #Output the solution in the vtk format. It will be saved in #the file "hexmes_example_1.vtk". res_mesh.output_vtk_mesh("hexmesh_example_1", [res_mfd.get_pressure_solution(), res_mesh.get_cell_domain_all()], ["MFDPressure", "DOMAIN"])
ohinai/mimpy
examples/fractures/multi_frac_w_input/hexmesh_example_1.py
Python
bsd-3-clause
2,579
[ "VTK" ]
bbde03567cfb35d8e078623c0f5ca6f648b7668a2c47fc013822c0734f431cff
# coding: utf-8 from __future__ import unicode_literals import binascii import collections import email import getpass import io import optparse import os import re import shlex import shutil import socket import struct import subprocess import sys import itertools import xml.etree.ElementTree try: import urllib.request as compat_urllib_request except ImportError: # Python 2 import urllib2 as compat_urllib_request try: import urllib.error as compat_urllib_error except ImportError: # Python 2 import urllib2 as compat_urllib_error try: import urllib.parse as compat_urllib_parse except ImportError: # Python 2 import urllib as compat_urllib_parse try: from urllib.parse import urlparse as compat_urllib_parse_urlparse except ImportError: # Python 2 from urlparse import urlparse as compat_urllib_parse_urlparse try: import urllib.parse as compat_urlparse except ImportError: # Python 2 import urlparse as compat_urlparse try: import urllib.response as compat_urllib_response except ImportError: # Python 2 import urllib as compat_urllib_response try: import http.cookiejar as compat_cookiejar except ImportError: # Python 2 import cookielib as compat_cookiejar try: import http.cookies as compat_cookies except ImportError: # Python 2 import Cookie as compat_cookies try: import html.entities as compat_html_entities except ImportError: # Python 2 import htmlentitydefs as compat_html_entities try: # Python >= 3.3 compat_html_entities_html5 = compat_html_entities.html5 except AttributeError: # Copied from CPython 3.5.1 html/entities.py compat_html_entities_html5 = { 'Aacute': '\xc1', 'aacute': '\xe1', 'Aacute;': '\xc1', 'aacute;': '\xe1', 'Abreve;': '\u0102', 'abreve;': '\u0103', 'ac;': '\u223e', 'acd;': '\u223f', 'acE;': '\u223e\u0333', 'Acirc': '\xc2', 'acirc': '\xe2', 'Acirc;': '\xc2', 'acirc;': '\xe2', 'acute': '\xb4', 'acute;': '\xb4', 'Acy;': '\u0410', 'acy;': '\u0430', 'AElig': '\xc6', 'aelig': '\xe6', 'AElig;': '\xc6', 'aelig;': '\xe6', 'af;': '\u2061', 'Afr;': '\U0001d504', 'afr;': '\U0001d51e', 'Agrave': '\xc0', 'agrave': '\xe0', 'Agrave;': '\xc0', 'agrave;': '\xe0', 'alefsym;': '\u2135', 'aleph;': '\u2135', 'Alpha;': '\u0391', 'alpha;': '\u03b1', 'Amacr;': '\u0100', 'amacr;': '\u0101', 'amalg;': '\u2a3f', 'AMP': '&', 'amp': '&', 'AMP;': '&', 'amp;': '&', 'And;': '\u2a53', 'and;': '\u2227', 'andand;': '\u2a55', 'andd;': '\u2a5c', 'andslope;': '\u2a58', 'andv;': '\u2a5a', 'ang;': '\u2220', 'ange;': '\u29a4', 'angle;': '\u2220', 'angmsd;': '\u2221', 'angmsdaa;': '\u29a8', 'angmsdab;': '\u29a9', 'angmsdac;': '\u29aa', 'angmsdad;': '\u29ab', 'angmsdae;': '\u29ac', 'angmsdaf;': '\u29ad', 'angmsdag;': '\u29ae', 'angmsdah;': '\u29af', 'angrt;': '\u221f', 'angrtvb;': '\u22be', 'angrtvbd;': '\u299d', 'angsph;': '\u2222', 'angst;': '\xc5', 'angzarr;': '\u237c', 'Aogon;': '\u0104', 'aogon;': '\u0105', 'Aopf;': '\U0001d538', 'aopf;': '\U0001d552', 'ap;': '\u2248', 'apacir;': '\u2a6f', 'apE;': '\u2a70', 'ape;': '\u224a', 'apid;': '\u224b', 'apos;': "'", 'ApplyFunction;': '\u2061', 'approx;': '\u2248', 'approxeq;': '\u224a', 'Aring': '\xc5', 'aring': '\xe5', 'Aring;': '\xc5', 'aring;': '\xe5', 'Ascr;': '\U0001d49c', 'ascr;': '\U0001d4b6', 'Assign;': '\u2254', 'ast;': '*', 'asymp;': '\u2248', 'asympeq;': '\u224d', 'Atilde': '\xc3', 'atilde': '\xe3', 'Atilde;': '\xc3', 'atilde;': '\xe3', 'Auml': '\xc4', 'auml': '\xe4', 'Auml;': '\xc4', 'auml;': '\xe4', 'awconint;': '\u2233', 'awint;': '\u2a11', 'backcong;': '\u224c', 'backepsilon;': '\u03f6', 'backprime;': '\u2035', 'backsim;': '\u223d', 'backsimeq;': '\u22cd', 'Backslash;': '\u2216', 'Barv;': '\u2ae7', 'barvee;': '\u22bd', 'Barwed;': '\u2306', 'barwed;': '\u2305', 'barwedge;': '\u2305', 'bbrk;': '\u23b5', 'bbrktbrk;': '\u23b6', 'bcong;': '\u224c', 'Bcy;': '\u0411', 'bcy;': '\u0431', 'bdquo;': '\u201e', 'becaus;': '\u2235', 'Because;': '\u2235', 'because;': '\u2235', 'bemptyv;': '\u29b0', 'bepsi;': '\u03f6', 'bernou;': '\u212c', 'Bernoullis;': '\u212c', 'Beta;': '\u0392', 'beta;': '\u03b2', 'beth;': '\u2136', 'between;': '\u226c', 'Bfr;': '\U0001d505', 'bfr;': '\U0001d51f', 'bigcap;': '\u22c2', 'bigcirc;': '\u25ef', 'bigcup;': '\u22c3', 'bigodot;': '\u2a00', 'bigoplus;': '\u2a01', 'bigotimes;': '\u2a02', 'bigsqcup;': '\u2a06', 'bigstar;': '\u2605', 'bigtriangledown;': '\u25bd', 'bigtriangleup;': '\u25b3', 'biguplus;': '\u2a04', 'bigvee;': '\u22c1', 'bigwedge;': '\u22c0', 'bkarow;': '\u290d', 'blacklozenge;': '\u29eb', 'blacksquare;': '\u25aa', 'blacktriangle;': '\u25b4', 'blacktriangledown;': '\u25be', 'blacktriangleleft;': '\u25c2', 'blacktriangleright;': '\u25b8', 'blank;': '\u2423', 'blk12;': '\u2592', 'blk14;': '\u2591', 'blk34;': '\u2593', 'block;': '\u2588', 'bne;': '=\u20e5', 'bnequiv;': '\u2261\u20e5', 'bNot;': '\u2aed', 'bnot;': '\u2310', 'Bopf;': '\U0001d539', 'bopf;': '\U0001d553', 'bot;': '\u22a5', 'bottom;': '\u22a5', 'bowtie;': '\u22c8', 'boxbox;': '\u29c9', 'boxDL;': '\u2557', 'boxDl;': '\u2556', 'boxdL;': '\u2555', 'boxdl;': '\u2510', 'boxDR;': '\u2554', 'boxDr;': '\u2553', 'boxdR;': '\u2552', 'boxdr;': '\u250c', 'boxH;': '\u2550', 'boxh;': '\u2500', 'boxHD;': '\u2566', 'boxHd;': '\u2564', 'boxhD;': '\u2565', 'boxhd;': '\u252c', 'boxHU;': '\u2569', 'boxHu;': '\u2567', 'boxhU;': '\u2568', 'boxhu;': '\u2534', 'boxminus;': '\u229f', 'boxplus;': '\u229e', 'boxtimes;': '\u22a0', 'boxUL;': '\u255d', 'boxUl;': '\u255c', 'boxuL;': '\u255b', 'boxul;': '\u2518', 'boxUR;': '\u255a', 'boxUr;': '\u2559', 'boxuR;': '\u2558', 'boxur;': '\u2514', 'boxV;': '\u2551', 'boxv;': '\u2502', 'boxVH;': '\u256c', 'boxVh;': '\u256b', 'boxvH;': '\u256a', 'boxvh;': '\u253c', 'boxVL;': '\u2563', 'boxVl;': '\u2562', 'boxvL;': '\u2561', 'boxvl;': '\u2524', 'boxVR;': '\u2560', 'boxVr;': '\u255f', 'boxvR;': '\u255e', 'boxvr;': '\u251c', 'bprime;': '\u2035', 'Breve;': '\u02d8', 'breve;': '\u02d8', 'brvbar': '\xa6', 'brvbar;': '\xa6', 'Bscr;': '\u212c', 'bscr;': '\U0001d4b7', 'bsemi;': '\u204f', 'bsim;': '\u223d', 'bsime;': '\u22cd', 'bsol;': '\\', 'bsolb;': '\u29c5', 'bsolhsub;': '\u27c8', 'bull;': '\u2022', 'bullet;': '\u2022', 'bump;': '\u224e', 'bumpE;': '\u2aae', 'bumpe;': '\u224f', 'Bumpeq;': '\u224e', 'bumpeq;': '\u224f', 'Cacute;': '\u0106', 'cacute;': '\u0107', 'Cap;': '\u22d2', 'cap;': '\u2229', 'capand;': '\u2a44', 'capbrcup;': '\u2a49', 'capcap;': '\u2a4b', 'capcup;': '\u2a47', 'capdot;': '\u2a40', 'CapitalDifferentialD;': '\u2145', 'caps;': '\u2229\ufe00', 'caret;': '\u2041', 'caron;': '\u02c7', 'Cayleys;': '\u212d', 'ccaps;': '\u2a4d', 'Ccaron;': '\u010c', 'ccaron;': '\u010d', 'Ccedil': '\xc7', 'ccedil': '\xe7', 'Ccedil;': '\xc7', 'ccedil;': '\xe7', 'Ccirc;': '\u0108', 'ccirc;': '\u0109', 'Cconint;': '\u2230', 'ccups;': '\u2a4c', 'ccupssm;': '\u2a50', 'Cdot;': '\u010a', 'cdot;': '\u010b', 'cedil': '\xb8', 'cedil;': '\xb8', 'Cedilla;': '\xb8', 'cemptyv;': '\u29b2', 'cent': '\xa2', 'cent;': '\xa2', 'CenterDot;': '\xb7', 'centerdot;': '\xb7', 'Cfr;': '\u212d', 'cfr;': '\U0001d520', 'CHcy;': '\u0427', 'chcy;': '\u0447', 'check;': '\u2713', 'checkmark;': '\u2713', 'Chi;': '\u03a7', 'chi;': '\u03c7', 'cir;': '\u25cb', 'circ;': '\u02c6', 'circeq;': '\u2257', 'circlearrowleft;': '\u21ba', 'circlearrowright;': '\u21bb', 'circledast;': '\u229b', 'circledcirc;': '\u229a', 'circleddash;': '\u229d', 'CircleDot;': '\u2299', 'circledR;': '\xae', 'circledS;': '\u24c8', 'CircleMinus;': '\u2296', 'CirclePlus;': '\u2295', 'CircleTimes;': '\u2297', 'cirE;': '\u29c3', 'cire;': '\u2257', 'cirfnint;': '\u2a10', 'cirmid;': '\u2aef', 'cirscir;': '\u29c2', 'ClockwiseContourIntegral;': '\u2232', 'CloseCurlyDoubleQuote;': '\u201d', 'CloseCurlyQuote;': '\u2019', 'clubs;': '\u2663', 'clubsuit;': '\u2663', 'Colon;': '\u2237', 'colon;': ':', 'Colone;': '\u2a74', 'colone;': '\u2254', 'coloneq;': '\u2254', 'comma;': ',', 'commat;': '@', 'comp;': '\u2201', 'compfn;': '\u2218', 'complement;': '\u2201', 'complexes;': '\u2102', 'cong;': '\u2245', 'congdot;': '\u2a6d', 'Congruent;': '\u2261', 'Conint;': '\u222f', 'conint;': '\u222e', 'ContourIntegral;': '\u222e', 'Copf;': '\u2102', 'copf;': '\U0001d554', 'coprod;': '\u2210', 'Coproduct;': '\u2210', 'COPY': '\xa9', 'copy': '\xa9', 'COPY;': '\xa9', 'copy;': '\xa9', 'copysr;': '\u2117', 'CounterClockwiseContourIntegral;': '\u2233', 'crarr;': '\u21b5', 'Cross;': '\u2a2f', 'cross;': '\u2717', 'Cscr;': '\U0001d49e', 'cscr;': '\U0001d4b8', 'csub;': '\u2acf', 'csube;': '\u2ad1', 'csup;': '\u2ad0', 'csupe;': '\u2ad2', 'ctdot;': '\u22ef', 'cudarrl;': '\u2938', 'cudarrr;': '\u2935', 'cuepr;': '\u22de', 'cuesc;': '\u22df', 'cularr;': '\u21b6', 'cularrp;': '\u293d', 'Cup;': '\u22d3', 'cup;': '\u222a', 'cupbrcap;': '\u2a48', 'CupCap;': '\u224d', 'cupcap;': '\u2a46', 'cupcup;': '\u2a4a', 'cupdot;': '\u228d', 'cupor;': '\u2a45', 'cups;': '\u222a\ufe00', 'curarr;': '\u21b7', 'curarrm;': '\u293c', 'curlyeqprec;': '\u22de', 'curlyeqsucc;': '\u22df', 'curlyvee;': '\u22ce', 'curlywedge;': '\u22cf', 'curren': '\xa4', 'curren;': '\xa4', 'curvearrowleft;': '\u21b6', 'curvearrowright;': '\u21b7', 'cuvee;': '\u22ce', 'cuwed;': '\u22cf', 'cwconint;': '\u2232', 'cwint;': '\u2231', 'cylcty;': '\u232d', 'Dagger;': '\u2021', 'dagger;': '\u2020', 'daleth;': '\u2138', 'Darr;': '\u21a1', 'dArr;': '\u21d3', 'darr;': '\u2193', 'dash;': '\u2010', 'Dashv;': '\u2ae4', 'dashv;': '\u22a3', 'dbkarow;': '\u290f', 'dblac;': '\u02dd', 'Dcaron;': '\u010e', 'dcaron;': '\u010f', 'Dcy;': '\u0414', 'dcy;': '\u0434', 'DD;': '\u2145', 'dd;': '\u2146', 'ddagger;': '\u2021', 'ddarr;': '\u21ca', 'DDotrahd;': '\u2911', 'ddotseq;': '\u2a77', 'deg': '\xb0', 'deg;': '\xb0', 'Del;': '\u2207', 'Delta;': '\u0394', 'delta;': '\u03b4', 'demptyv;': '\u29b1', 'dfisht;': '\u297f', 'Dfr;': '\U0001d507', 'dfr;': '\U0001d521', 'dHar;': '\u2965', 'dharl;': '\u21c3', 'dharr;': '\u21c2', 'DiacriticalAcute;': '\xb4', 'DiacriticalDot;': '\u02d9', 'DiacriticalDoubleAcute;': '\u02dd', 'DiacriticalGrave;': '`', 'DiacriticalTilde;': '\u02dc', 'diam;': '\u22c4', 'Diamond;': '\u22c4', 'diamond;': '\u22c4', 'diamondsuit;': '\u2666', 'diams;': '\u2666', 'die;': '\xa8', 'DifferentialD;': '\u2146', 'digamma;': '\u03dd', 'disin;': '\u22f2', 'div;': '\xf7', 'divide': '\xf7', 'divide;': '\xf7', 'divideontimes;': '\u22c7', 'divonx;': '\u22c7', 'DJcy;': '\u0402', 'djcy;': '\u0452', 'dlcorn;': '\u231e', 'dlcrop;': '\u230d', 'dollar;': '$', 'Dopf;': '\U0001d53b', 'dopf;': '\U0001d555', 'Dot;': '\xa8', 'dot;': '\u02d9', 'DotDot;': '\u20dc', 'doteq;': '\u2250', 'doteqdot;': '\u2251', 'DotEqual;': '\u2250', 'dotminus;': '\u2238', 'dotplus;': '\u2214', 'dotsquare;': '\u22a1', 'doublebarwedge;': '\u2306', 'DoubleContourIntegral;': '\u222f', 'DoubleDot;': '\xa8', 'DoubleDownArrow;': '\u21d3', 'DoubleLeftArrow;': '\u21d0', 'DoubleLeftRightArrow;': '\u21d4', 'DoubleLeftTee;': '\u2ae4', 'DoubleLongLeftArrow;': '\u27f8', 'DoubleLongLeftRightArrow;': '\u27fa', 'DoubleLongRightArrow;': '\u27f9', 'DoubleRightArrow;': '\u21d2', 'DoubleRightTee;': '\u22a8', 'DoubleUpArrow;': '\u21d1', 'DoubleUpDownArrow;': '\u21d5', 'DoubleVerticalBar;': '\u2225', 'DownArrow;': '\u2193', 'Downarrow;': '\u21d3', 'downarrow;': '\u2193', 'DownArrowBar;': '\u2913', 'DownArrowUpArrow;': '\u21f5', 'DownBreve;': '\u0311', 'downdownarrows;': '\u21ca', 'downharpoonleft;': '\u21c3', 'downharpoonright;': '\u21c2', 'DownLeftRightVector;': '\u2950', 'DownLeftTeeVector;': '\u295e', 'DownLeftVector;': '\u21bd', 'DownLeftVectorBar;': '\u2956', 'DownRightTeeVector;': '\u295f', 'DownRightVector;': '\u21c1', 'DownRightVectorBar;': '\u2957', 'DownTee;': '\u22a4', 'DownTeeArrow;': '\u21a7', 'drbkarow;': '\u2910', 'drcorn;': '\u231f', 'drcrop;': '\u230c', 'Dscr;': '\U0001d49f', 'dscr;': '\U0001d4b9', 'DScy;': '\u0405', 'dscy;': '\u0455', 'dsol;': '\u29f6', 'Dstrok;': '\u0110', 'dstrok;': '\u0111', 'dtdot;': '\u22f1', 'dtri;': '\u25bf', 'dtrif;': '\u25be', 'duarr;': '\u21f5', 'duhar;': '\u296f', 'dwangle;': '\u29a6', 'DZcy;': '\u040f', 'dzcy;': '\u045f', 'dzigrarr;': '\u27ff', 'Eacute': '\xc9', 'eacute': '\xe9', 'Eacute;': '\xc9', 'eacute;': '\xe9', 'easter;': '\u2a6e', 'Ecaron;': '\u011a', 'ecaron;': '\u011b', 'ecir;': '\u2256', 'Ecirc': '\xca', 'ecirc': '\xea', 'Ecirc;': '\xca', 'ecirc;': '\xea', 'ecolon;': '\u2255', 'Ecy;': '\u042d', 'ecy;': '\u044d', 'eDDot;': '\u2a77', 'Edot;': '\u0116', 'eDot;': '\u2251', 'edot;': '\u0117', 'ee;': '\u2147', 'efDot;': '\u2252', 'Efr;': '\U0001d508', 'efr;': '\U0001d522', 'eg;': '\u2a9a', 'Egrave': '\xc8', 'egrave': '\xe8', 'Egrave;': '\xc8', 'egrave;': '\xe8', 'egs;': '\u2a96', 'egsdot;': '\u2a98', 'el;': '\u2a99', 'Element;': '\u2208', 'elinters;': '\u23e7', 'ell;': '\u2113', 'els;': '\u2a95', 'elsdot;': '\u2a97', 'Emacr;': '\u0112', 'emacr;': '\u0113', 'empty;': '\u2205', 'emptyset;': '\u2205', 'EmptySmallSquare;': '\u25fb', 'emptyv;': '\u2205', 'EmptyVerySmallSquare;': '\u25ab', 'emsp13;': '\u2004', 'emsp14;': '\u2005', 'emsp;': '\u2003', 'ENG;': '\u014a', 'eng;': '\u014b', 'ensp;': '\u2002', 'Eogon;': '\u0118', 'eogon;': '\u0119', 'Eopf;': '\U0001d53c', 'eopf;': '\U0001d556', 'epar;': '\u22d5', 'eparsl;': '\u29e3', 'eplus;': '\u2a71', 'epsi;': '\u03b5', 'Epsilon;': '\u0395', 'epsilon;': '\u03b5', 'epsiv;': '\u03f5', 'eqcirc;': '\u2256', 'eqcolon;': '\u2255', 'eqsim;': '\u2242', 'eqslantgtr;': '\u2a96', 'eqslantless;': '\u2a95', 'Equal;': '\u2a75', 'equals;': '=', 'EqualTilde;': '\u2242', 'equest;': '\u225f', 'Equilibrium;': '\u21cc', 'equiv;': '\u2261', 'equivDD;': '\u2a78', 'eqvparsl;': '\u29e5', 'erarr;': '\u2971', 'erDot;': '\u2253', 'Escr;': '\u2130', 'escr;': '\u212f', 'esdot;': '\u2250', 'Esim;': '\u2a73', 'esim;': '\u2242', 'Eta;': '\u0397', 'eta;': '\u03b7', 'ETH': '\xd0', 'eth': '\xf0', 'ETH;': '\xd0', 'eth;': '\xf0', 'Euml': '\xcb', 'euml': '\xeb', 'Euml;': '\xcb', 'euml;': '\xeb', 'euro;': '\u20ac', 'excl;': '!', 'exist;': '\u2203', 'Exists;': '\u2203', 'expectation;': '\u2130', 'ExponentialE;': '\u2147', 'exponentiale;': '\u2147', 'fallingdotseq;': '\u2252', 'Fcy;': '\u0424', 'fcy;': '\u0444', 'female;': '\u2640', 'ffilig;': '\ufb03', 'fflig;': '\ufb00', 'ffllig;': '\ufb04', 'Ffr;': '\U0001d509', 'ffr;': '\U0001d523', 'filig;': '\ufb01', 'FilledSmallSquare;': '\u25fc', 'FilledVerySmallSquare;': '\u25aa', 'fjlig;': 'fj', 'flat;': '\u266d', 'fllig;': '\ufb02', 'fltns;': '\u25b1', 'fnof;': '\u0192', 'Fopf;': '\U0001d53d', 'fopf;': '\U0001d557', 'ForAll;': '\u2200', 'forall;': '\u2200', 'fork;': '\u22d4', 'forkv;': '\u2ad9', 'Fouriertrf;': '\u2131', 'fpartint;': '\u2a0d', 'frac12': '\xbd', 'frac12;': '\xbd', 'frac13;': '\u2153', 'frac14': '\xbc', 'frac14;': '\xbc', 'frac15;': '\u2155', 'frac16;': '\u2159', 'frac18;': '\u215b', 'frac23;': '\u2154', 'frac25;': '\u2156', 'frac34': '\xbe', 'frac34;': '\xbe', 'frac35;': '\u2157', 'frac38;': '\u215c', 'frac45;': '\u2158', 'frac56;': '\u215a', 'frac58;': '\u215d', 'frac78;': '\u215e', 'frasl;': '\u2044', 'frown;': '\u2322', 'Fscr;': '\u2131', 'fscr;': '\U0001d4bb', 'gacute;': '\u01f5', 'Gamma;': '\u0393', 'gamma;': '\u03b3', 'Gammad;': '\u03dc', 'gammad;': '\u03dd', 'gap;': '\u2a86', 'Gbreve;': '\u011e', 'gbreve;': '\u011f', 'Gcedil;': '\u0122', 'Gcirc;': '\u011c', 'gcirc;': '\u011d', 'Gcy;': '\u0413', 'gcy;': '\u0433', 'Gdot;': '\u0120', 'gdot;': '\u0121', 'gE;': '\u2267', 'ge;': '\u2265', 'gEl;': '\u2a8c', 'gel;': '\u22db', 'geq;': '\u2265', 'geqq;': '\u2267', 'geqslant;': '\u2a7e', 'ges;': '\u2a7e', 'gescc;': '\u2aa9', 'gesdot;': '\u2a80', 'gesdoto;': '\u2a82', 'gesdotol;': '\u2a84', 'gesl;': '\u22db\ufe00', 'gesles;': '\u2a94', 'Gfr;': '\U0001d50a', 'gfr;': '\U0001d524', 'Gg;': '\u22d9', 'gg;': '\u226b', 'ggg;': '\u22d9', 'gimel;': '\u2137', 'GJcy;': '\u0403', 'gjcy;': '\u0453', 'gl;': '\u2277', 'gla;': '\u2aa5', 'glE;': '\u2a92', 'glj;': '\u2aa4', 'gnap;': '\u2a8a', 'gnapprox;': '\u2a8a', 'gnE;': '\u2269', 'gne;': '\u2a88', 'gneq;': '\u2a88', 'gneqq;': '\u2269', 'gnsim;': '\u22e7', 'Gopf;': '\U0001d53e', 'gopf;': '\U0001d558', 'grave;': '`', 'GreaterEqual;': '\u2265', 'GreaterEqualLess;': '\u22db', 'GreaterFullEqual;': '\u2267', 'GreaterGreater;': '\u2aa2', 'GreaterLess;': '\u2277', 'GreaterSlantEqual;': '\u2a7e', 'GreaterTilde;': '\u2273', 'Gscr;': '\U0001d4a2', 'gscr;': '\u210a', 'gsim;': '\u2273', 'gsime;': '\u2a8e', 'gsiml;': '\u2a90', 'GT': '>', 'gt': '>', 'GT;': '>', 'Gt;': '\u226b', 'gt;': '>', 'gtcc;': '\u2aa7', 'gtcir;': '\u2a7a', 'gtdot;': '\u22d7', 'gtlPar;': '\u2995', 'gtquest;': '\u2a7c', 'gtrapprox;': '\u2a86', 'gtrarr;': '\u2978', 'gtrdot;': '\u22d7', 'gtreqless;': '\u22db', 'gtreqqless;': '\u2a8c', 'gtrless;': '\u2277', 'gtrsim;': '\u2273', 'gvertneqq;': '\u2269\ufe00', 'gvnE;': '\u2269\ufe00', 'Hacek;': '\u02c7', 'hairsp;': '\u200a', 'half;': '\xbd', 'hamilt;': '\u210b', 'HARDcy;': '\u042a', 'hardcy;': '\u044a', 'hArr;': '\u21d4', 'harr;': '\u2194', 'harrcir;': '\u2948', 'harrw;': '\u21ad', 'Hat;': '^', 'hbar;': '\u210f', 'Hcirc;': '\u0124', 'hcirc;': '\u0125', 'hearts;': '\u2665', 'heartsuit;': '\u2665', 'hellip;': '\u2026', 'hercon;': '\u22b9', 'Hfr;': '\u210c', 'hfr;': '\U0001d525', 'HilbertSpace;': '\u210b', 'hksearow;': '\u2925', 'hkswarow;': '\u2926', 'hoarr;': '\u21ff', 'homtht;': '\u223b', 'hookleftarrow;': '\u21a9', 'hookrightarrow;': '\u21aa', 'Hopf;': '\u210d', 'hopf;': '\U0001d559', 'horbar;': '\u2015', 'HorizontalLine;': '\u2500', 'Hscr;': '\u210b', 'hscr;': '\U0001d4bd', 'hslash;': '\u210f', 'Hstrok;': '\u0126', 'hstrok;': '\u0127', 'HumpDownHump;': '\u224e', 'HumpEqual;': '\u224f', 'hybull;': '\u2043', 'hyphen;': '\u2010', 'Iacute': '\xcd', 'iacute': '\xed', 'Iacute;': '\xcd', 'iacute;': '\xed', 'ic;': '\u2063', 'Icirc': '\xce', 'icirc': '\xee', 'Icirc;': '\xce', 'icirc;': '\xee', 'Icy;': '\u0418', 'icy;': '\u0438', 'Idot;': '\u0130', 'IEcy;': '\u0415', 'iecy;': '\u0435', 'iexcl': '\xa1', 'iexcl;': '\xa1', 'iff;': '\u21d4', 'Ifr;': '\u2111', 'ifr;': '\U0001d526', 'Igrave': '\xcc', 'igrave': '\xec', 'Igrave;': '\xcc', 'igrave;': '\xec', 'ii;': '\u2148', 'iiiint;': '\u2a0c', 'iiint;': '\u222d', 'iinfin;': '\u29dc', 'iiota;': '\u2129', 'IJlig;': '\u0132', 'ijlig;': '\u0133', 'Im;': '\u2111', 'Imacr;': '\u012a', 'imacr;': '\u012b', 'image;': '\u2111', 'ImaginaryI;': '\u2148', 'imagline;': '\u2110', 'imagpart;': '\u2111', 'imath;': '\u0131', 'imof;': '\u22b7', 'imped;': '\u01b5', 'Implies;': '\u21d2', 'in;': '\u2208', 'incare;': '\u2105', 'infin;': '\u221e', 'infintie;': '\u29dd', 'inodot;': '\u0131', 'Int;': '\u222c', 'int;': '\u222b', 'intcal;': '\u22ba', 'integers;': '\u2124', 'Integral;': '\u222b', 'intercal;': '\u22ba', 'Intersection;': '\u22c2', 'intlarhk;': '\u2a17', 'intprod;': '\u2a3c', 'InvisibleComma;': '\u2063', 'InvisibleTimes;': '\u2062', 'IOcy;': '\u0401', 'iocy;': '\u0451', 'Iogon;': '\u012e', 'iogon;': '\u012f', 'Iopf;': '\U0001d540', 'iopf;': '\U0001d55a', 'Iota;': '\u0399', 'iota;': '\u03b9', 'iprod;': '\u2a3c', 'iquest': '\xbf', 'iquest;': '\xbf', 'Iscr;': '\u2110', 'iscr;': '\U0001d4be', 'isin;': '\u2208', 'isindot;': '\u22f5', 'isinE;': '\u22f9', 'isins;': '\u22f4', 'isinsv;': '\u22f3', 'isinv;': '\u2208', 'it;': '\u2062', 'Itilde;': '\u0128', 'itilde;': '\u0129', 'Iukcy;': '\u0406', 'iukcy;': '\u0456', 'Iuml': '\xcf', 'iuml': '\xef', 'Iuml;': '\xcf', 'iuml;': '\xef', 'Jcirc;': '\u0134', 'jcirc;': '\u0135', 'Jcy;': '\u0419', 'jcy;': '\u0439', 'Jfr;': '\U0001d50d', 'jfr;': '\U0001d527', 'jmath;': '\u0237', 'Jopf;': '\U0001d541', 'jopf;': '\U0001d55b', 'Jscr;': '\U0001d4a5', 'jscr;': '\U0001d4bf', 'Jsercy;': '\u0408', 'jsercy;': '\u0458', 'Jukcy;': '\u0404', 'jukcy;': '\u0454', 'Kappa;': '\u039a', 'kappa;': '\u03ba', 'kappav;': '\u03f0', 'Kcedil;': '\u0136', 'kcedil;': '\u0137', 'Kcy;': '\u041a', 'kcy;': '\u043a', 'Kfr;': '\U0001d50e', 'kfr;': '\U0001d528', 'kgreen;': '\u0138', 'KHcy;': '\u0425', 'khcy;': '\u0445', 'KJcy;': '\u040c', 'kjcy;': '\u045c', 'Kopf;': '\U0001d542', 'kopf;': '\U0001d55c', 'Kscr;': '\U0001d4a6', 'kscr;': '\U0001d4c0', 'lAarr;': '\u21da', 'Lacute;': '\u0139', 'lacute;': '\u013a', 'laemptyv;': '\u29b4', 'lagran;': '\u2112', 'Lambda;': '\u039b', 'lambda;': '\u03bb', 'Lang;': '\u27ea', 'lang;': '\u27e8', 'langd;': '\u2991', 'langle;': '\u27e8', 'lap;': '\u2a85', 'Laplacetrf;': '\u2112', 'laquo': '\xab', 'laquo;': '\xab', 'Larr;': '\u219e', 'lArr;': '\u21d0', 'larr;': '\u2190', 'larrb;': '\u21e4', 'larrbfs;': '\u291f', 'larrfs;': '\u291d', 'larrhk;': '\u21a9', 'larrlp;': '\u21ab', 'larrpl;': '\u2939', 'larrsim;': '\u2973', 'larrtl;': '\u21a2', 'lat;': '\u2aab', 'lAtail;': '\u291b', 'latail;': '\u2919', 'late;': '\u2aad', 'lates;': '\u2aad\ufe00', 'lBarr;': '\u290e', 'lbarr;': '\u290c', 'lbbrk;': '\u2772', 'lbrace;': '{', 'lbrack;': '[', 'lbrke;': '\u298b', 'lbrksld;': '\u298f', 'lbrkslu;': '\u298d', 'Lcaron;': '\u013d', 'lcaron;': '\u013e', 'Lcedil;': '\u013b', 'lcedil;': '\u013c', 'lceil;': '\u2308', 'lcub;': '{', 'Lcy;': '\u041b', 'lcy;': '\u043b', 'ldca;': '\u2936', 'ldquo;': '\u201c', 'ldquor;': '\u201e', 'ldrdhar;': '\u2967', 'ldrushar;': '\u294b', 'ldsh;': '\u21b2', 'lE;': '\u2266', 'le;': '\u2264', 'LeftAngleBracket;': '\u27e8', 'LeftArrow;': '\u2190', 'Leftarrow;': '\u21d0', 'leftarrow;': '\u2190', 'LeftArrowBar;': '\u21e4', 'LeftArrowRightArrow;': '\u21c6', 'leftarrowtail;': '\u21a2', 'LeftCeiling;': '\u2308', 'LeftDoubleBracket;': '\u27e6', 'LeftDownTeeVector;': '\u2961', 'LeftDownVector;': '\u21c3', 'LeftDownVectorBar;': '\u2959', 'LeftFloor;': '\u230a', 'leftharpoondown;': '\u21bd', 'leftharpoonup;': '\u21bc', 'leftleftarrows;': '\u21c7', 'LeftRightArrow;': '\u2194', 'Leftrightarrow;': '\u21d4', 'leftrightarrow;': '\u2194', 'leftrightarrows;': '\u21c6', 'leftrightharpoons;': '\u21cb', 'leftrightsquigarrow;': '\u21ad', 'LeftRightVector;': '\u294e', 'LeftTee;': '\u22a3', 'LeftTeeArrow;': '\u21a4', 'LeftTeeVector;': '\u295a', 'leftthreetimes;': '\u22cb', 'LeftTriangle;': '\u22b2', 'LeftTriangleBar;': '\u29cf', 'LeftTriangleEqual;': '\u22b4', 'LeftUpDownVector;': '\u2951', 'LeftUpTeeVector;': '\u2960', 'LeftUpVector;': '\u21bf', 'LeftUpVectorBar;': '\u2958', 'LeftVector;': '\u21bc', 'LeftVectorBar;': '\u2952', 'lEg;': '\u2a8b', 'leg;': '\u22da', 'leq;': '\u2264', 'leqq;': '\u2266', 'leqslant;': '\u2a7d', 'les;': '\u2a7d', 'lescc;': '\u2aa8', 'lesdot;': '\u2a7f', 'lesdoto;': '\u2a81', 'lesdotor;': '\u2a83', 'lesg;': '\u22da\ufe00', 'lesges;': '\u2a93', 'lessapprox;': '\u2a85', 'lessdot;': '\u22d6', 'lesseqgtr;': '\u22da', 'lesseqqgtr;': '\u2a8b', 'LessEqualGreater;': '\u22da', 'LessFullEqual;': '\u2266', 'LessGreater;': '\u2276', 'lessgtr;': '\u2276', 'LessLess;': '\u2aa1', 'lesssim;': '\u2272', 'LessSlantEqual;': '\u2a7d', 'LessTilde;': '\u2272', 'lfisht;': '\u297c', 'lfloor;': '\u230a', 'Lfr;': '\U0001d50f', 'lfr;': '\U0001d529', 'lg;': '\u2276', 'lgE;': '\u2a91', 'lHar;': '\u2962', 'lhard;': '\u21bd', 'lharu;': '\u21bc', 'lharul;': '\u296a', 'lhblk;': '\u2584', 'LJcy;': '\u0409', 'ljcy;': '\u0459', 'Ll;': '\u22d8', 'll;': '\u226a', 'llarr;': '\u21c7', 'llcorner;': '\u231e', 'Lleftarrow;': '\u21da', 'llhard;': '\u296b', 'lltri;': '\u25fa', 'Lmidot;': '\u013f', 'lmidot;': '\u0140', 'lmoust;': '\u23b0', 'lmoustache;': '\u23b0', 'lnap;': '\u2a89', 'lnapprox;': '\u2a89', 'lnE;': '\u2268', 'lne;': '\u2a87', 'lneq;': '\u2a87', 'lneqq;': '\u2268', 'lnsim;': '\u22e6', 'loang;': '\u27ec', 'loarr;': '\u21fd', 'lobrk;': '\u27e6', 'LongLeftArrow;': '\u27f5', 'Longleftarrow;': '\u27f8', 'longleftarrow;': '\u27f5', 'LongLeftRightArrow;': '\u27f7', 'Longleftrightarrow;': '\u27fa', 'longleftrightarrow;': '\u27f7', 'longmapsto;': '\u27fc', 'LongRightArrow;': '\u27f6', 'Longrightarrow;': '\u27f9', 'longrightarrow;': '\u27f6', 'looparrowleft;': '\u21ab', 'looparrowright;': '\u21ac', 'lopar;': '\u2985', 'Lopf;': '\U0001d543', 'lopf;': '\U0001d55d', 'loplus;': '\u2a2d', 'lotimes;': '\u2a34', 'lowast;': '\u2217', 'lowbar;': '_', 'LowerLeftArrow;': '\u2199', 'LowerRightArrow;': '\u2198', 'loz;': '\u25ca', 'lozenge;': '\u25ca', 'lozf;': '\u29eb', 'lpar;': '(', 'lparlt;': '\u2993', 'lrarr;': '\u21c6', 'lrcorner;': '\u231f', 'lrhar;': '\u21cb', 'lrhard;': '\u296d', 'lrm;': '\u200e', 'lrtri;': '\u22bf', 'lsaquo;': '\u2039', 'Lscr;': '\u2112', 'lscr;': '\U0001d4c1', 'Lsh;': '\u21b0', 'lsh;': '\u21b0', 'lsim;': '\u2272', 'lsime;': '\u2a8d', 'lsimg;': '\u2a8f', 'lsqb;': '[', 'lsquo;': '\u2018', 'lsquor;': '\u201a', 'Lstrok;': '\u0141', 'lstrok;': '\u0142', 'LT': '<', 'lt': '<', 'LT;': '<', 'Lt;': '\u226a', 'lt;': '<', 'ltcc;': '\u2aa6', 'ltcir;': '\u2a79', 'ltdot;': '\u22d6', 'lthree;': '\u22cb', 'ltimes;': '\u22c9', 'ltlarr;': '\u2976', 'ltquest;': '\u2a7b', 'ltri;': '\u25c3', 'ltrie;': '\u22b4', 'ltrif;': '\u25c2', 'ltrPar;': '\u2996', 'lurdshar;': '\u294a', 'luruhar;': '\u2966', 'lvertneqq;': '\u2268\ufe00', 'lvnE;': '\u2268\ufe00', 'macr': '\xaf', 'macr;': '\xaf', 'male;': '\u2642', 'malt;': '\u2720', 'maltese;': '\u2720', 'Map;': '\u2905', 'map;': '\u21a6', 'mapsto;': '\u21a6', 'mapstodown;': '\u21a7', 'mapstoleft;': '\u21a4', 'mapstoup;': '\u21a5', 'marker;': '\u25ae', 'mcomma;': '\u2a29', 'Mcy;': '\u041c', 'mcy;': '\u043c', 'mdash;': '\u2014', 'mDDot;': '\u223a', 'measuredangle;': '\u2221', 'MediumSpace;': '\u205f', 'Mellintrf;': '\u2133', 'Mfr;': '\U0001d510', 'mfr;': '\U0001d52a', 'mho;': '\u2127', 'micro': '\xb5', 'micro;': '\xb5', 'mid;': '\u2223', 'midast;': '*', 'midcir;': '\u2af0', 'middot': '\xb7', 'middot;': '\xb7', 'minus;': '\u2212', 'minusb;': '\u229f', 'minusd;': '\u2238', 'minusdu;': '\u2a2a', 'MinusPlus;': '\u2213', 'mlcp;': '\u2adb', 'mldr;': '\u2026', 'mnplus;': '\u2213', 'models;': '\u22a7', 'Mopf;': '\U0001d544', 'mopf;': '\U0001d55e', 'mp;': '\u2213', 'Mscr;': '\u2133', 'mscr;': '\U0001d4c2', 'mstpos;': '\u223e', 'Mu;': '\u039c', 'mu;': '\u03bc', 'multimap;': '\u22b8', 'mumap;': '\u22b8', 'nabla;': '\u2207', 'Nacute;': '\u0143', 'nacute;': '\u0144', 'nang;': '\u2220\u20d2', 'nap;': '\u2249', 'napE;': '\u2a70\u0338', 'napid;': '\u224b\u0338', 'napos;': '\u0149', 'napprox;': '\u2249', 'natur;': '\u266e', 'natural;': '\u266e', 'naturals;': '\u2115', 'nbsp': '\xa0', 'nbsp;': '\xa0', 'nbump;': '\u224e\u0338', 'nbumpe;': '\u224f\u0338', 'ncap;': '\u2a43', 'Ncaron;': '\u0147', 'ncaron;': '\u0148', 'Ncedil;': '\u0145', 'ncedil;': '\u0146', 'ncong;': '\u2247', 'ncongdot;': '\u2a6d\u0338', 'ncup;': '\u2a42', 'Ncy;': '\u041d', 'ncy;': '\u043d', 'ndash;': '\u2013', 'ne;': '\u2260', 'nearhk;': '\u2924', 'neArr;': '\u21d7', 'nearr;': '\u2197', 'nearrow;': '\u2197', 'nedot;': '\u2250\u0338', 'NegativeMediumSpace;': '\u200b', 'NegativeThickSpace;': '\u200b', 'NegativeThinSpace;': '\u200b', 'NegativeVeryThinSpace;': '\u200b', 'nequiv;': '\u2262', 'nesear;': '\u2928', 'nesim;': '\u2242\u0338', 'NestedGreaterGreater;': '\u226b', 'NestedLessLess;': '\u226a', 'NewLine;': '\n', 'nexist;': '\u2204', 'nexists;': '\u2204', 'Nfr;': '\U0001d511', 'nfr;': '\U0001d52b', 'ngE;': '\u2267\u0338', 'nge;': '\u2271', 'ngeq;': '\u2271', 'ngeqq;': '\u2267\u0338', 'ngeqslant;': '\u2a7e\u0338', 'nges;': '\u2a7e\u0338', 'nGg;': '\u22d9\u0338', 'ngsim;': '\u2275', 'nGt;': '\u226b\u20d2', 'ngt;': '\u226f', 'ngtr;': '\u226f', 'nGtv;': '\u226b\u0338', 'nhArr;': '\u21ce', 'nharr;': '\u21ae', 'nhpar;': '\u2af2', 'ni;': '\u220b', 'nis;': '\u22fc', 'nisd;': '\u22fa', 'niv;': '\u220b', 'NJcy;': '\u040a', 'njcy;': '\u045a', 'nlArr;': '\u21cd', 'nlarr;': '\u219a', 'nldr;': '\u2025', 'nlE;': '\u2266\u0338', 'nle;': '\u2270', 'nLeftarrow;': '\u21cd', 'nleftarrow;': '\u219a', 'nLeftrightarrow;': '\u21ce', 'nleftrightarrow;': '\u21ae', 'nleq;': '\u2270', 'nleqq;': '\u2266\u0338', 'nleqslant;': '\u2a7d\u0338', 'nles;': '\u2a7d\u0338', 'nless;': '\u226e', 'nLl;': '\u22d8\u0338', 'nlsim;': '\u2274', 'nLt;': '\u226a\u20d2', 'nlt;': '\u226e', 'nltri;': '\u22ea', 'nltrie;': '\u22ec', 'nLtv;': '\u226a\u0338', 'nmid;': '\u2224', 'NoBreak;': '\u2060', 'NonBreakingSpace;': '\xa0', 'Nopf;': '\u2115', 'nopf;': '\U0001d55f', 'not': '\xac', 'Not;': '\u2aec', 'not;': '\xac', 'NotCongruent;': '\u2262', 'NotCupCap;': '\u226d', 'NotDoubleVerticalBar;': '\u2226', 'NotElement;': '\u2209', 'NotEqual;': '\u2260', 'NotEqualTilde;': '\u2242\u0338', 'NotExists;': '\u2204', 'NotGreater;': '\u226f', 'NotGreaterEqual;': '\u2271', 'NotGreaterFullEqual;': '\u2267\u0338', 'NotGreaterGreater;': '\u226b\u0338', 'NotGreaterLess;': '\u2279', 'NotGreaterSlantEqual;': '\u2a7e\u0338', 'NotGreaterTilde;': '\u2275', 'NotHumpDownHump;': '\u224e\u0338', 'NotHumpEqual;': '\u224f\u0338', 'notin;': '\u2209', 'notindot;': '\u22f5\u0338', 'notinE;': '\u22f9\u0338', 'notinva;': '\u2209', 'notinvb;': '\u22f7', 'notinvc;': '\u22f6', 'NotLeftTriangle;': '\u22ea', 'NotLeftTriangleBar;': '\u29cf\u0338', 'NotLeftTriangleEqual;': '\u22ec', 'NotLess;': '\u226e', 'NotLessEqual;': '\u2270', 'NotLessGreater;': '\u2278', 'NotLessLess;': '\u226a\u0338', 'NotLessSlantEqual;': '\u2a7d\u0338', 'NotLessTilde;': '\u2274', 'NotNestedGreaterGreater;': '\u2aa2\u0338', 'NotNestedLessLess;': '\u2aa1\u0338', 'notni;': '\u220c', 'notniva;': '\u220c', 'notnivb;': '\u22fe', 'notnivc;': '\u22fd', 'NotPrecedes;': '\u2280', 'NotPrecedesEqual;': '\u2aaf\u0338', 'NotPrecedesSlantEqual;': '\u22e0', 'NotReverseElement;': '\u220c', 'NotRightTriangle;': '\u22eb', 'NotRightTriangleBar;': '\u29d0\u0338', 'NotRightTriangleEqual;': '\u22ed', 'NotSquareSubset;': '\u228f\u0338', 'NotSquareSubsetEqual;': '\u22e2', 'NotSquareSuperset;': '\u2290\u0338', 'NotSquareSupersetEqual;': '\u22e3', 'NotSubset;': '\u2282\u20d2', 'NotSubsetEqual;': '\u2288', 'NotSucceeds;': '\u2281', 'NotSucceedsEqual;': '\u2ab0\u0338', 'NotSucceedsSlantEqual;': '\u22e1', 'NotSucceedsTilde;': '\u227f\u0338', 'NotSuperset;': '\u2283\u20d2', 'NotSupersetEqual;': '\u2289', 'NotTilde;': '\u2241', 'NotTildeEqual;': '\u2244', 'NotTildeFullEqual;': '\u2247', 'NotTildeTilde;': '\u2249', 'NotVerticalBar;': '\u2224', 'npar;': '\u2226', 'nparallel;': '\u2226', 'nparsl;': '\u2afd\u20e5', 'npart;': '\u2202\u0338', 'npolint;': '\u2a14', 'npr;': '\u2280', 'nprcue;': '\u22e0', 'npre;': '\u2aaf\u0338', 'nprec;': '\u2280', 'npreceq;': '\u2aaf\u0338', 'nrArr;': '\u21cf', 'nrarr;': '\u219b', 'nrarrc;': '\u2933\u0338', 'nrarrw;': '\u219d\u0338', 'nRightarrow;': '\u21cf', 'nrightarrow;': '\u219b', 'nrtri;': '\u22eb', 'nrtrie;': '\u22ed', 'nsc;': '\u2281', 'nsccue;': '\u22e1', 'nsce;': '\u2ab0\u0338', 'Nscr;': '\U0001d4a9', 'nscr;': '\U0001d4c3', 'nshortmid;': '\u2224', 'nshortparallel;': '\u2226', 'nsim;': '\u2241', 'nsime;': '\u2244', 'nsimeq;': '\u2244', 'nsmid;': '\u2224', 'nspar;': '\u2226', 'nsqsube;': '\u22e2', 'nsqsupe;': '\u22e3', 'nsub;': '\u2284', 'nsubE;': '\u2ac5\u0338', 'nsube;': '\u2288', 'nsubset;': '\u2282\u20d2', 'nsubseteq;': '\u2288', 'nsubseteqq;': '\u2ac5\u0338', 'nsucc;': '\u2281', 'nsucceq;': '\u2ab0\u0338', 'nsup;': '\u2285', 'nsupE;': '\u2ac6\u0338', 'nsupe;': '\u2289', 'nsupset;': '\u2283\u20d2', 'nsupseteq;': '\u2289', 'nsupseteqq;': '\u2ac6\u0338', 'ntgl;': '\u2279', 'Ntilde': '\xd1', 'ntilde': '\xf1', 'Ntilde;': '\xd1', 'ntilde;': '\xf1', 'ntlg;': '\u2278', 'ntriangleleft;': '\u22ea', 'ntrianglelefteq;': '\u22ec', 'ntriangleright;': '\u22eb', 'ntrianglerighteq;': '\u22ed', 'Nu;': '\u039d', 'nu;': '\u03bd', 'num;': '#', 'numero;': '\u2116', 'numsp;': '\u2007', 'nvap;': '\u224d\u20d2', 'nVDash;': '\u22af', 'nVdash;': '\u22ae', 'nvDash;': '\u22ad', 'nvdash;': '\u22ac', 'nvge;': '\u2265\u20d2', 'nvgt;': '>\u20d2', 'nvHarr;': '\u2904', 'nvinfin;': '\u29de', 'nvlArr;': '\u2902', 'nvle;': '\u2264\u20d2', 'nvlt;': '<\u20d2', 'nvltrie;': '\u22b4\u20d2', 'nvrArr;': '\u2903', 'nvrtrie;': '\u22b5\u20d2', 'nvsim;': '\u223c\u20d2', 'nwarhk;': '\u2923', 'nwArr;': '\u21d6', 'nwarr;': '\u2196', 'nwarrow;': '\u2196', 'nwnear;': '\u2927', 'Oacute': '\xd3', 'oacute': '\xf3', 'Oacute;': '\xd3', 'oacute;': '\xf3', 'oast;': '\u229b', 'ocir;': '\u229a', 'Ocirc': '\xd4', 'ocirc': '\xf4', 'Ocirc;': '\xd4', 'ocirc;': '\xf4', 'Ocy;': '\u041e', 'ocy;': '\u043e', 'odash;': '\u229d', 'Odblac;': '\u0150', 'odblac;': '\u0151', 'odiv;': '\u2a38', 'odot;': '\u2299', 'odsold;': '\u29bc', 'OElig;': '\u0152', 'oelig;': '\u0153', 'ofcir;': '\u29bf', 'Ofr;': '\U0001d512', 'ofr;': '\U0001d52c', 'ogon;': '\u02db', 'Ograve': '\xd2', 'ograve': '\xf2', 'Ograve;': '\xd2', 'ograve;': '\xf2', 'ogt;': '\u29c1', 'ohbar;': '\u29b5', 'ohm;': '\u03a9', 'oint;': '\u222e', 'olarr;': '\u21ba', 'olcir;': '\u29be', 'olcross;': '\u29bb', 'oline;': '\u203e', 'olt;': '\u29c0', 'Omacr;': '\u014c', 'omacr;': '\u014d', 'Omega;': '\u03a9', 'omega;': '\u03c9', 'Omicron;': '\u039f', 'omicron;': '\u03bf', 'omid;': '\u29b6', 'ominus;': '\u2296', 'Oopf;': '\U0001d546', 'oopf;': '\U0001d560', 'opar;': '\u29b7', 'OpenCurlyDoubleQuote;': '\u201c', 'OpenCurlyQuote;': '\u2018', 'operp;': '\u29b9', 'oplus;': '\u2295', 'Or;': '\u2a54', 'or;': '\u2228', 'orarr;': '\u21bb', 'ord;': '\u2a5d', 'order;': '\u2134', 'orderof;': '\u2134', 'ordf': '\xaa', 'ordf;': '\xaa', 'ordm': '\xba', 'ordm;': '\xba', 'origof;': '\u22b6', 'oror;': '\u2a56', 'orslope;': '\u2a57', 'orv;': '\u2a5b', 'oS;': '\u24c8', 'Oscr;': '\U0001d4aa', 'oscr;': '\u2134', 'Oslash': '\xd8', 'oslash': '\xf8', 'Oslash;': '\xd8', 'oslash;': '\xf8', 'osol;': '\u2298', 'Otilde': '\xd5', 'otilde': '\xf5', 'Otilde;': '\xd5', 'otilde;': '\xf5', 'Otimes;': '\u2a37', 'otimes;': '\u2297', 'otimesas;': '\u2a36', 'Ouml': '\xd6', 'ouml': '\xf6', 'Ouml;': '\xd6', 'ouml;': '\xf6', 'ovbar;': '\u233d', 'OverBar;': '\u203e', 'OverBrace;': '\u23de', 'OverBracket;': '\u23b4', 'OverParenthesis;': '\u23dc', 'par;': '\u2225', 'para': '\xb6', 'para;': '\xb6', 'parallel;': '\u2225', 'parsim;': '\u2af3', 'parsl;': '\u2afd', 'part;': '\u2202', 'PartialD;': '\u2202', 'Pcy;': '\u041f', 'pcy;': '\u043f', 'percnt;': '%', 'period;': '.', 'permil;': '\u2030', 'perp;': '\u22a5', 'pertenk;': '\u2031', 'Pfr;': '\U0001d513', 'pfr;': '\U0001d52d', 'Phi;': '\u03a6', 'phi;': '\u03c6', 'phiv;': '\u03d5', 'phmmat;': '\u2133', 'phone;': '\u260e', 'Pi;': '\u03a0', 'pi;': '\u03c0', 'pitchfork;': '\u22d4', 'piv;': '\u03d6', 'planck;': '\u210f', 'planckh;': '\u210e', 'plankv;': '\u210f', 'plus;': '+', 'plusacir;': '\u2a23', 'plusb;': '\u229e', 'pluscir;': '\u2a22', 'plusdo;': '\u2214', 'plusdu;': '\u2a25', 'pluse;': '\u2a72', 'PlusMinus;': '\xb1', 'plusmn': '\xb1', 'plusmn;': '\xb1', 'plussim;': '\u2a26', 'plustwo;': '\u2a27', 'pm;': '\xb1', 'Poincareplane;': '\u210c', 'pointint;': '\u2a15', 'Popf;': '\u2119', 'popf;': '\U0001d561', 'pound': '\xa3', 'pound;': '\xa3', 'Pr;': '\u2abb', 'pr;': '\u227a', 'prap;': '\u2ab7', 'prcue;': '\u227c', 'prE;': '\u2ab3', 'pre;': '\u2aaf', 'prec;': '\u227a', 'precapprox;': '\u2ab7', 'preccurlyeq;': '\u227c', 'Precedes;': '\u227a', 'PrecedesEqual;': '\u2aaf', 'PrecedesSlantEqual;': '\u227c', 'PrecedesTilde;': '\u227e', 'preceq;': '\u2aaf', 'precnapprox;': '\u2ab9', 'precneqq;': '\u2ab5', 'precnsim;': '\u22e8', 'precsim;': '\u227e', 'Prime;': '\u2033', 'prime;': '\u2032', 'primes;': '\u2119', 'prnap;': '\u2ab9', 'prnE;': '\u2ab5', 'prnsim;': '\u22e8', 'prod;': '\u220f', 'Product;': '\u220f', 'profalar;': '\u232e', 'profline;': '\u2312', 'profsurf;': '\u2313', 'prop;': '\u221d', 'Proportion;': '\u2237', 'Proportional;': '\u221d', 'propto;': '\u221d', 'prsim;': '\u227e', 'prurel;': '\u22b0', 'Pscr;': '\U0001d4ab', 'pscr;': '\U0001d4c5', 'Psi;': '\u03a8', 'psi;': '\u03c8', 'puncsp;': '\u2008', 'Qfr;': '\U0001d514', 'qfr;': '\U0001d52e', 'qint;': '\u2a0c', 'Qopf;': '\u211a', 'qopf;': '\U0001d562', 'qprime;': '\u2057', 'Qscr;': '\U0001d4ac', 'qscr;': '\U0001d4c6', 'quaternions;': '\u210d', 'quatint;': '\u2a16', 'quest;': '?', 'questeq;': '\u225f', 'QUOT': '"', 'quot': '"', 'QUOT;': '"', 'quot;': '"', 'rAarr;': '\u21db', 'race;': '\u223d\u0331', 'Racute;': '\u0154', 'racute;': '\u0155', 'radic;': '\u221a', 'raemptyv;': '\u29b3', 'Rang;': '\u27eb', 'rang;': '\u27e9', 'rangd;': '\u2992', 'range;': '\u29a5', 'rangle;': '\u27e9', 'raquo': '\xbb', 'raquo;': '\xbb', 'Rarr;': '\u21a0', 'rArr;': '\u21d2', 'rarr;': '\u2192', 'rarrap;': '\u2975', 'rarrb;': '\u21e5', 'rarrbfs;': '\u2920', 'rarrc;': '\u2933', 'rarrfs;': '\u291e', 'rarrhk;': '\u21aa', 'rarrlp;': '\u21ac', 'rarrpl;': '\u2945', 'rarrsim;': '\u2974', 'Rarrtl;': '\u2916', 'rarrtl;': '\u21a3', 'rarrw;': '\u219d', 'rAtail;': '\u291c', 'ratail;': '\u291a', 'ratio;': '\u2236', 'rationals;': '\u211a', 'RBarr;': '\u2910', 'rBarr;': '\u290f', 'rbarr;': '\u290d', 'rbbrk;': '\u2773', 'rbrace;': '}', 'rbrack;': ']', 'rbrke;': '\u298c', 'rbrksld;': '\u298e', 'rbrkslu;': '\u2990', 'Rcaron;': '\u0158', 'rcaron;': '\u0159', 'Rcedil;': '\u0156', 'rcedil;': '\u0157', 'rceil;': '\u2309', 'rcub;': '}', 'Rcy;': '\u0420', 'rcy;': '\u0440', 'rdca;': '\u2937', 'rdldhar;': '\u2969', 'rdquo;': '\u201d', 'rdquor;': '\u201d', 'rdsh;': '\u21b3', 'Re;': '\u211c', 'real;': '\u211c', 'realine;': '\u211b', 'realpart;': '\u211c', 'reals;': '\u211d', 'rect;': '\u25ad', 'REG': '\xae', 'reg': '\xae', 'REG;': '\xae', 'reg;': '\xae', 'ReverseElement;': '\u220b', 'ReverseEquilibrium;': '\u21cb', 'ReverseUpEquilibrium;': '\u296f', 'rfisht;': '\u297d', 'rfloor;': '\u230b', 'Rfr;': '\u211c', 'rfr;': '\U0001d52f', 'rHar;': '\u2964', 'rhard;': '\u21c1', 'rharu;': '\u21c0', 'rharul;': '\u296c', 'Rho;': '\u03a1', 'rho;': '\u03c1', 'rhov;': '\u03f1', 'RightAngleBracket;': '\u27e9', 'RightArrow;': '\u2192', 'Rightarrow;': '\u21d2', 'rightarrow;': '\u2192', 'RightArrowBar;': '\u21e5', 'RightArrowLeftArrow;': '\u21c4', 'rightarrowtail;': '\u21a3', 'RightCeiling;': '\u2309', 'RightDoubleBracket;': '\u27e7', 'RightDownTeeVector;': '\u295d', 'RightDownVector;': '\u21c2', 'RightDownVectorBar;': '\u2955', 'RightFloor;': '\u230b', 'rightharpoondown;': '\u21c1', 'rightharpoonup;': '\u21c0', 'rightleftarrows;': '\u21c4', 'rightleftharpoons;': '\u21cc', 'rightrightarrows;': '\u21c9', 'rightsquigarrow;': '\u219d', 'RightTee;': '\u22a2', 'RightTeeArrow;': '\u21a6', 'RightTeeVector;': '\u295b', 'rightthreetimes;': '\u22cc', 'RightTriangle;': '\u22b3', 'RightTriangleBar;': '\u29d0', 'RightTriangleEqual;': '\u22b5', 'RightUpDownVector;': '\u294f', 'RightUpTeeVector;': '\u295c', 'RightUpVector;': '\u21be', 'RightUpVectorBar;': '\u2954', 'RightVector;': '\u21c0', 'RightVectorBar;': '\u2953', 'ring;': '\u02da', 'risingdotseq;': '\u2253', 'rlarr;': '\u21c4', 'rlhar;': '\u21cc', 'rlm;': '\u200f', 'rmoust;': '\u23b1', 'rmoustache;': '\u23b1', 'rnmid;': '\u2aee', 'roang;': '\u27ed', 'roarr;': '\u21fe', 'robrk;': '\u27e7', 'ropar;': '\u2986', 'Ropf;': '\u211d', 'ropf;': '\U0001d563', 'roplus;': '\u2a2e', 'rotimes;': '\u2a35', 'RoundImplies;': '\u2970', 'rpar;': ')', 'rpargt;': '\u2994', 'rppolint;': '\u2a12', 'rrarr;': '\u21c9', 'Rrightarrow;': '\u21db', 'rsaquo;': '\u203a', 'Rscr;': '\u211b', 'rscr;': '\U0001d4c7', 'Rsh;': '\u21b1', 'rsh;': '\u21b1', 'rsqb;': ']', 'rsquo;': '\u2019', 'rsquor;': '\u2019', 'rthree;': '\u22cc', 'rtimes;': '\u22ca', 'rtri;': '\u25b9', 'rtrie;': '\u22b5', 'rtrif;': '\u25b8', 'rtriltri;': '\u29ce', 'RuleDelayed;': '\u29f4', 'ruluhar;': '\u2968', 'rx;': '\u211e', 'Sacute;': '\u015a', 'sacute;': '\u015b', 'sbquo;': '\u201a', 'Sc;': '\u2abc', 'sc;': '\u227b', 'scap;': '\u2ab8', 'Scaron;': '\u0160', 'scaron;': '\u0161', 'sccue;': '\u227d', 'scE;': '\u2ab4', 'sce;': '\u2ab0', 'Scedil;': '\u015e', 'scedil;': '\u015f', 'Scirc;': '\u015c', 'scirc;': '\u015d', 'scnap;': '\u2aba', 'scnE;': '\u2ab6', 'scnsim;': '\u22e9', 'scpolint;': '\u2a13', 'scsim;': '\u227f', 'Scy;': '\u0421', 'scy;': '\u0441', 'sdot;': '\u22c5', 'sdotb;': '\u22a1', 'sdote;': '\u2a66', 'searhk;': '\u2925', 'seArr;': '\u21d8', 'searr;': '\u2198', 'searrow;': '\u2198', 'sect': '\xa7', 'sect;': '\xa7', 'semi;': ';', 'seswar;': '\u2929', 'setminus;': '\u2216', 'setmn;': '\u2216', 'sext;': '\u2736', 'Sfr;': '\U0001d516', 'sfr;': '\U0001d530', 'sfrown;': '\u2322', 'sharp;': '\u266f', 'SHCHcy;': '\u0429', 'shchcy;': '\u0449', 'SHcy;': '\u0428', 'shcy;': '\u0448', 'ShortDownArrow;': '\u2193', 'ShortLeftArrow;': '\u2190', 'shortmid;': '\u2223', 'shortparallel;': '\u2225', 'ShortRightArrow;': '\u2192', 'ShortUpArrow;': '\u2191', 'shy': '\xad', 'shy;': '\xad', 'Sigma;': '\u03a3', 'sigma;': '\u03c3', 'sigmaf;': '\u03c2', 'sigmav;': '\u03c2', 'sim;': '\u223c', 'simdot;': '\u2a6a', 'sime;': '\u2243', 'simeq;': '\u2243', 'simg;': '\u2a9e', 'simgE;': '\u2aa0', 'siml;': '\u2a9d', 'simlE;': '\u2a9f', 'simne;': '\u2246', 'simplus;': '\u2a24', 'simrarr;': '\u2972', 'slarr;': '\u2190', 'SmallCircle;': '\u2218', 'smallsetminus;': '\u2216', 'smashp;': '\u2a33', 'smeparsl;': '\u29e4', 'smid;': '\u2223', 'smile;': '\u2323', 'smt;': '\u2aaa', 'smte;': '\u2aac', 'smtes;': '\u2aac\ufe00', 'SOFTcy;': '\u042c', 'softcy;': '\u044c', 'sol;': '/', 'solb;': '\u29c4', 'solbar;': '\u233f', 'Sopf;': '\U0001d54a', 'sopf;': '\U0001d564', 'spades;': '\u2660', 'spadesuit;': '\u2660', 'spar;': '\u2225', 'sqcap;': '\u2293', 'sqcaps;': '\u2293\ufe00', 'sqcup;': '\u2294', 'sqcups;': '\u2294\ufe00', 'Sqrt;': '\u221a', 'sqsub;': '\u228f', 'sqsube;': '\u2291', 'sqsubset;': '\u228f', 'sqsubseteq;': '\u2291', 'sqsup;': '\u2290', 'sqsupe;': '\u2292', 'sqsupset;': '\u2290', 'sqsupseteq;': '\u2292', 'squ;': '\u25a1', 'Square;': '\u25a1', 'square;': '\u25a1', 'SquareIntersection;': '\u2293', 'SquareSubset;': '\u228f', 'SquareSubsetEqual;': '\u2291', 'SquareSuperset;': '\u2290', 'SquareSupersetEqual;': '\u2292', 'SquareUnion;': '\u2294', 'squarf;': '\u25aa', 'squf;': '\u25aa', 'srarr;': '\u2192', 'Sscr;': '\U0001d4ae', 'sscr;': '\U0001d4c8', 'ssetmn;': '\u2216', 'ssmile;': '\u2323', 'sstarf;': '\u22c6', 'Star;': '\u22c6', 'star;': '\u2606', 'starf;': '\u2605', 'straightepsilon;': '\u03f5', 'straightphi;': '\u03d5', 'strns;': '\xaf', 'Sub;': '\u22d0', 'sub;': '\u2282', 'subdot;': '\u2abd', 'subE;': '\u2ac5', 'sube;': '\u2286', 'subedot;': '\u2ac3', 'submult;': '\u2ac1', 'subnE;': '\u2acb', 'subne;': '\u228a', 'subplus;': '\u2abf', 'subrarr;': '\u2979', 'Subset;': '\u22d0', 'subset;': '\u2282', 'subseteq;': '\u2286', 'subseteqq;': '\u2ac5', 'SubsetEqual;': '\u2286', 'subsetneq;': '\u228a', 'subsetneqq;': '\u2acb', 'subsim;': '\u2ac7', 'subsub;': '\u2ad5', 'subsup;': '\u2ad3', 'succ;': '\u227b', 'succapprox;': '\u2ab8', 'succcurlyeq;': '\u227d', 'Succeeds;': '\u227b', 'SucceedsEqual;': '\u2ab0', 'SucceedsSlantEqual;': '\u227d', 'SucceedsTilde;': '\u227f', 'succeq;': '\u2ab0', 'succnapprox;': '\u2aba', 'succneqq;': '\u2ab6', 'succnsim;': '\u22e9', 'succsim;': '\u227f', 'SuchThat;': '\u220b', 'Sum;': '\u2211', 'sum;': '\u2211', 'sung;': '\u266a', 'sup1': '\xb9', 'sup1;': '\xb9', 'sup2': '\xb2', 'sup2;': '\xb2', 'sup3': '\xb3', 'sup3;': '\xb3', 'Sup;': '\u22d1', 'sup;': '\u2283', 'supdot;': '\u2abe', 'supdsub;': '\u2ad8', 'supE;': '\u2ac6', 'supe;': '\u2287', 'supedot;': '\u2ac4', 'Superset;': '\u2283', 'SupersetEqual;': '\u2287', 'suphsol;': '\u27c9', 'suphsub;': '\u2ad7', 'suplarr;': '\u297b', 'supmult;': '\u2ac2', 'supnE;': '\u2acc', 'supne;': '\u228b', 'supplus;': '\u2ac0', 'Supset;': '\u22d1', 'supset;': '\u2283', 'supseteq;': '\u2287', 'supseteqq;': '\u2ac6', 'supsetneq;': '\u228b', 'supsetneqq;': '\u2acc', 'supsim;': '\u2ac8', 'supsub;': '\u2ad4', 'supsup;': '\u2ad6', 'swarhk;': '\u2926', 'swArr;': '\u21d9', 'swarr;': '\u2199', 'swarrow;': '\u2199', 'swnwar;': '\u292a', 'szlig': '\xdf', 'szlig;': '\xdf', 'Tab;': '\t', 'target;': '\u2316', 'Tau;': '\u03a4', 'tau;': '\u03c4', 'tbrk;': '\u23b4', 'Tcaron;': '\u0164', 'tcaron;': '\u0165', 'Tcedil;': '\u0162', 'tcedil;': '\u0163', 'Tcy;': '\u0422', 'tcy;': '\u0442', 'tdot;': '\u20db', 'telrec;': '\u2315', 'Tfr;': '\U0001d517', 'tfr;': '\U0001d531', 'there4;': '\u2234', 'Therefore;': '\u2234', 'therefore;': '\u2234', 'Theta;': '\u0398', 'theta;': '\u03b8', 'thetasym;': '\u03d1', 'thetav;': '\u03d1', 'thickapprox;': '\u2248', 'thicksim;': '\u223c', 'ThickSpace;': '\u205f\u200a', 'thinsp;': '\u2009', 'ThinSpace;': '\u2009', 'thkap;': '\u2248', 'thksim;': '\u223c', 'THORN': '\xde', 'thorn': '\xfe', 'THORN;': '\xde', 'thorn;': '\xfe', 'Tilde;': '\u223c', 'tilde;': '\u02dc', 'TildeEqual;': '\u2243', 'TildeFullEqual;': '\u2245', 'TildeTilde;': '\u2248', 'times': '\xd7', 'times;': '\xd7', 'timesb;': '\u22a0', 'timesbar;': '\u2a31', 'timesd;': '\u2a30', 'tint;': '\u222d', 'toea;': '\u2928', 'top;': '\u22a4', 'topbot;': '\u2336', 'topcir;': '\u2af1', 'Topf;': '\U0001d54b', 'topf;': '\U0001d565', 'topfork;': '\u2ada', 'tosa;': '\u2929', 'tprime;': '\u2034', 'TRADE;': '\u2122', 'trade;': '\u2122', 'triangle;': '\u25b5', 'triangledown;': '\u25bf', 'triangleleft;': '\u25c3', 'trianglelefteq;': '\u22b4', 'triangleq;': '\u225c', 'triangleright;': '\u25b9', 'trianglerighteq;': '\u22b5', 'tridot;': '\u25ec', 'trie;': '\u225c', 'triminus;': '\u2a3a', 'TripleDot;': '\u20db', 'triplus;': '\u2a39', 'trisb;': '\u29cd', 'tritime;': '\u2a3b', 'trpezium;': '\u23e2', 'Tscr;': '\U0001d4af', 'tscr;': '\U0001d4c9', 'TScy;': '\u0426', 'tscy;': '\u0446', 'TSHcy;': '\u040b', 'tshcy;': '\u045b', 'Tstrok;': '\u0166', 'tstrok;': '\u0167', 'twixt;': '\u226c', 'twoheadleftarrow;': '\u219e', 'twoheadrightarrow;': '\u21a0', 'Uacute': '\xda', 'uacute': '\xfa', 'Uacute;': '\xda', 'uacute;': '\xfa', 'Uarr;': '\u219f', 'uArr;': '\u21d1', 'uarr;': '\u2191', 'Uarrocir;': '\u2949', 'Ubrcy;': '\u040e', 'ubrcy;': '\u045e', 'Ubreve;': '\u016c', 'ubreve;': '\u016d', 'Ucirc': '\xdb', 'ucirc': '\xfb', 'Ucirc;': '\xdb', 'ucirc;': '\xfb', 'Ucy;': '\u0423', 'ucy;': '\u0443', 'udarr;': '\u21c5', 'Udblac;': '\u0170', 'udblac;': '\u0171', 'udhar;': '\u296e', 'ufisht;': '\u297e', 'Ufr;': '\U0001d518', 'ufr;': '\U0001d532', 'Ugrave': '\xd9', 'ugrave': '\xf9', 'Ugrave;': '\xd9', 'ugrave;': '\xf9', 'uHar;': '\u2963', 'uharl;': '\u21bf', 'uharr;': '\u21be', 'uhblk;': '\u2580', 'ulcorn;': '\u231c', 'ulcorner;': '\u231c', 'ulcrop;': '\u230f', 'ultri;': '\u25f8', 'Umacr;': '\u016a', 'umacr;': '\u016b', 'uml': '\xa8', 'uml;': '\xa8', 'UnderBar;': '_', 'UnderBrace;': '\u23df', 'UnderBracket;': '\u23b5', 'UnderParenthesis;': '\u23dd', 'Union;': '\u22c3', 'UnionPlus;': '\u228e', 'Uogon;': '\u0172', 'uogon;': '\u0173', 'Uopf;': '\U0001d54c', 'uopf;': '\U0001d566', 'UpArrow;': '\u2191', 'Uparrow;': '\u21d1', 'uparrow;': '\u2191', 'UpArrowBar;': '\u2912', 'UpArrowDownArrow;': '\u21c5', 'UpDownArrow;': '\u2195', 'Updownarrow;': '\u21d5', 'updownarrow;': '\u2195', 'UpEquilibrium;': '\u296e', 'upharpoonleft;': '\u21bf', 'upharpoonright;': '\u21be', 'uplus;': '\u228e', 'UpperLeftArrow;': '\u2196', 'UpperRightArrow;': '\u2197', 'Upsi;': '\u03d2', 'upsi;': '\u03c5', 'upsih;': '\u03d2', 'Upsilon;': '\u03a5', 'upsilon;': '\u03c5', 'UpTee;': '\u22a5', 'UpTeeArrow;': '\u21a5', 'upuparrows;': '\u21c8', 'urcorn;': '\u231d', 'urcorner;': '\u231d', 'urcrop;': '\u230e', 'Uring;': '\u016e', 'uring;': '\u016f', 'urtri;': '\u25f9', 'Uscr;': '\U0001d4b0', 'uscr;': '\U0001d4ca', 'utdot;': '\u22f0', 'Utilde;': '\u0168', 'utilde;': '\u0169', 'utri;': '\u25b5', 'utrif;': '\u25b4', 'uuarr;': '\u21c8', 'Uuml': '\xdc', 'uuml': '\xfc', 'Uuml;': '\xdc', 'uuml;': '\xfc', 'uwangle;': '\u29a7', 'vangrt;': '\u299c', 'varepsilon;': '\u03f5', 'varkappa;': '\u03f0', 'varnothing;': '\u2205', 'varphi;': '\u03d5', 'varpi;': '\u03d6', 'varpropto;': '\u221d', 'vArr;': '\u21d5', 'varr;': '\u2195', 'varrho;': '\u03f1', 'varsigma;': '\u03c2', 'varsubsetneq;': '\u228a\ufe00', 'varsubsetneqq;': '\u2acb\ufe00', 'varsupsetneq;': '\u228b\ufe00', 'varsupsetneqq;': '\u2acc\ufe00', 'vartheta;': '\u03d1', 'vartriangleleft;': '\u22b2', 'vartriangleright;': '\u22b3', 'Vbar;': '\u2aeb', 'vBar;': '\u2ae8', 'vBarv;': '\u2ae9', 'Vcy;': '\u0412', 'vcy;': '\u0432', 'VDash;': '\u22ab', 'Vdash;': '\u22a9', 'vDash;': '\u22a8', 'vdash;': '\u22a2', 'Vdashl;': '\u2ae6', 'Vee;': '\u22c1', 'vee;': '\u2228', 'veebar;': '\u22bb', 'veeeq;': '\u225a', 'vellip;': '\u22ee', 'Verbar;': '\u2016', 'verbar;': '|', 'Vert;': '\u2016', 'vert;': '|', 'VerticalBar;': '\u2223', 'VerticalLine;': '|', 'VerticalSeparator;': '\u2758', 'VerticalTilde;': '\u2240', 'VeryThinSpace;': '\u200a', 'Vfr;': '\U0001d519', 'vfr;': '\U0001d533', 'vltri;': '\u22b2', 'vnsub;': '\u2282\u20d2', 'vnsup;': '\u2283\u20d2', 'Vopf;': '\U0001d54d', 'vopf;': '\U0001d567', 'vprop;': '\u221d', 'vrtri;': '\u22b3', 'Vscr;': '\U0001d4b1', 'vscr;': '\U0001d4cb', 'vsubnE;': '\u2acb\ufe00', 'vsubne;': '\u228a\ufe00', 'vsupnE;': '\u2acc\ufe00', 'vsupne;': '\u228b\ufe00', 'Vvdash;': '\u22aa', 'vzigzag;': '\u299a', 'Wcirc;': '\u0174', 'wcirc;': '\u0175', 'wedbar;': '\u2a5f', 'Wedge;': '\u22c0', 'wedge;': '\u2227', 'wedgeq;': '\u2259', 'weierp;': '\u2118', 'Wfr;': '\U0001d51a', 'wfr;': '\U0001d534', 'Wopf;': '\U0001d54e', 'wopf;': '\U0001d568', 'wp;': '\u2118', 'wr;': '\u2240', 'wreath;': '\u2240', 'Wscr;': '\U0001d4b2', 'wscr;': '\U0001d4cc', 'xcap;': '\u22c2', 'xcirc;': '\u25ef', 'xcup;': '\u22c3', 'xdtri;': '\u25bd', 'Xfr;': '\U0001d51b', 'xfr;': '\U0001d535', 'xhArr;': '\u27fa', 'xharr;': '\u27f7', 'Xi;': '\u039e', 'xi;': '\u03be', 'xlArr;': '\u27f8', 'xlarr;': '\u27f5', 'xmap;': '\u27fc', 'xnis;': '\u22fb', 'xodot;': '\u2a00', 'Xopf;': '\U0001d54f', 'xopf;': '\U0001d569', 'xoplus;': '\u2a01', 'xotime;': '\u2a02', 'xrArr;': '\u27f9', 'xrarr;': '\u27f6', 'Xscr;': '\U0001d4b3', 'xscr;': '\U0001d4cd', 'xsqcup;': '\u2a06', 'xuplus;': '\u2a04', 'xutri;': '\u25b3', 'xvee;': '\u22c1', 'xwedge;': '\u22c0', 'Yacute': '\xdd', 'yacute': '\xfd', 'Yacute;': '\xdd', 'yacute;': '\xfd', 'YAcy;': '\u042f', 'yacy;': '\u044f', 'Ycirc;': '\u0176', 'ycirc;': '\u0177', 'Ycy;': '\u042b', 'ycy;': '\u044b', 'yen': '\xa5', 'yen;': '\xa5', 'Yfr;': '\U0001d51c', 'yfr;': '\U0001d536', 'YIcy;': '\u0407', 'yicy;': '\u0457', 'Yopf;': '\U0001d550', 'yopf;': '\U0001d56a', 'Yscr;': '\U0001d4b4', 'yscr;': '\U0001d4ce', 'YUcy;': '\u042e', 'yucy;': '\u044e', 'yuml': '\xff', 'Yuml;': '\u0178', 'yuml;': '\xff', 'Zacute;': '\u0179', 'zacute;': '\u017a', 'Zcaron;': '\u017d', 'zcaron;': '\u017e', 'Zcy;': '\u0417', 'zcy;': '\u0437', 'Zdot;': '\u017b', 'zdot;': '\u017c', 'zeetrf;': '\u2128', 'ZeroWidthSpace;': '\u200b', 'Zeta;': '\u0396', 'zeta;': '\u03b6', 'Zfr;': '\u2128', 'zfr;': '\U0001d537', 'ZHcy;': '\u0416', 'zhcy;': '\u0436', 'zigrarr;': '\u21dd', 'Zopf;': '\u2124', 'zopf;': '\U0001d56b', 'Zscr;': '\U0001d4b5', 'zscr;': '\U0001d4cf', 'zwj;': '\u200d', 'zwnj;': '\u200c', } try: import http.client as compat_http_client except ImportError: # Python 2 import httplib as compat_http_client try: from urllib.error import HTTPError as compat_HTTPError except ImportError: # Python 2 from urllib2 import HTTPError as compat_HTTPError try: from urllib.request import urlretrieve as compat_urlretrieve except ImportError: # Python 2 from urllib import urlretrieve as compat_urlretrieve try: from html.parser import HTMLParser as compat_HTMLParser except ImportError: # Python 2 from HTMLParser import HTMLParser as compat_HTMLParser try: from subprocess import DEVNULL compat_subprocess_get_DEVNULL = lambda: DEVNULL except ImportError: compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') try: import http.server as compat_http_server except ImportError: import BaseHTTPServer as compat_http_server try: compat_str = unicode # Python 2 except NameError: compat_str = str try: from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes from urllib.parse import unquote as compat_urllib_parse_unquote from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus except ImportError: # Python 2 _asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire') else re.compile('([\x00-\x7f]+)')) # HACK: The following are the correct unquote_to_bytes, unquote and unquote_plus # implementations from cpython 3.4.3's stdlib. Python 2's version # is apparently broken (see https://github.com/rg3/youtube-dl/pull/6244) def compat_urllib_parse_unquote_to_bytes(string): """unquote_to_bytes('abc%20def') -> b'abc def'.""" # Note: strings are encoded as UTF-8. This is only an issue if it contains # unescaped non-ASCII characters, which URIs should not. if not string: # Is it a string-like object? string.split return b'' if isinstance(string, compat_str): string = string.encode('utf-8') bits = string.split(b'%') if len(bits) == 1: return string res = [bits[0]] append = res.append for item in bits[1:]: try: append(compat_urllib_parse._hextochr[item[:2]]) append(item[2:]) except KeyError: append(b'%') append(item) return b''.join(res) def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'): """Replace %xx escapes by their single-character equivalent. The optional encoding and errors parameters specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. By default, percent-encoded sequences are decoded with UTF-8, and invalid sequences are replaced by a placeholder character. unquote('abc%20def') -> 'abc def'. """ if '%' not in string: string.split return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' bits = _asciire.split(string) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(compat_urllib_parse_unquote_to_bytes(bits[i]).decode(encoding, errors)) append(bits[i + 1]) return ''.join(res) def compat_urllib_parse_unquote_plus(string, encoding='utf-8', errors='replace'): """Like unquote(), but also replace plus signs by spaces, as required for unquoting HTML form values. unquote_plus('%7e/abc+def') -> '~/abc def' """ string = string.replace('+', ' ') return compat_urllib_parse_unquote(string, encoding, errors) try: from urllib.parse import urlencode as compat_urllib_parse_urlencode except ImportError: # Python 2 # Python 2 will choke in urlencode on mixture of byte and unicode strings. # Possible solutions are to either port it from python 3 with all # the friends or manually ensure input query contains only byte strings. # We will stick with latter thus recursively encoding the whole query. def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'): def encode_elem(e): if isinstance(e, dict): e = encode_dict(e) elif isinstance(e, (list, tuple,)): list_e = encode_list(e) e = tuple(list_e) if isinstance(e, tuple) else list_e elif isinstance(e, compat_str): e = e.encode(encoding) return e def encode_dict(d): return dict((encode_elem(k), encode_elem(v)) for k, v in d.items()) def encode_list(l): return [encode_elem(e) for e in l] return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq) try: from urllib.request import DataHandler as compat_urllib_request_DataHandler except ImportError: # Python < 3.4 # Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler): def data_open(self, req): # data URLs as specified in RFC 2397. # # ignores POSTed data # # syntax: # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data # mediatype := [ type "/" subtype ] *( ";" parameter ) # data := *urlchar # parameter := attribute "=" value url = req.get_full_url() scheme, data = url.split(':', 1) mediatype, data = data.split(',', 1) # even base64 encoded data URLs might be quoted so unquote in any case: data = compat_urllib_parse_unquote_to_bytes(data) if mediatype.endswith(';base64'): data = binascii.a2b_base64(data) mediatype = mediatype[:-7] if not mediatype: mediatype = 'text/plain;charset=US-ASCII' headers = email.message_from_string( 'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data))) return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url) try: compat_basestring = basestring # Python 2 except NameError: compat_basestring = str try: compat_chr = unichr # Python 2 except NameError: compat_chr = chr try: from xml.etree.ElementTree import ParseError as compat_xml_parse_error except ImportError: # Python 2.6 from xml.parsers.expat import ExpatError as compat_xml_parse_error etree = xml.etree.ElementTree class _TreeBuilder(etree.TreeBuilder): def doctype(self, name, pubid, system): pass if sys.version_info[0] >= 3: def compat_etree_fromstring(text): return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder())) else: # python 2.x tries to encode unicode strings with ascii (see the # XMLParser._fixtext method) try: _etree_iter = etree.Element.iter except AttributeError: # Python <=2.6 def _etree_iter(root): for el in root.findall('*'): yield el for sub in _etree_iter(el): yield sub # on 2.6 XML doesn't have a parser argument, function copied from CPython # 2.7 source def _XML(text, parser=None): if not parser: parser = etree.XMLParser(target=_TreeBuilder()) parser.feed(text) return parser.close() def _element_factory(*args, **kwargs): el = etree.Element(*args, **kwargs) for k, v in el.items(): if isinstance(v, bytes): el.set(k, v.decode('utf-8')) return el def compat_etree_fromstring(text): doc = _XML(text, parser=etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory))) for el in _etree_iter(doc): if el.text is not None and isinstance(el.text, bytes): el.text = el.text.decode('utf-8') return doc if sys.version_info < (2, 7): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! def compat_xpath(xpath): if isinstance(xpath, compat_str): xpath = xpath.encode('ascii') return xpath else: compat_xpath = lambda xpath: xpath try: from urllib.parse import parse_qs as compat_parse_qs except ImportError: # Python 2 # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. # Python 2's version is apparently totally broken def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): qs, _coerce_result = qs, compat_str pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError('bad query field: %r' % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = nv[0].replace('+', ' ') name = compat_urllib_parse_unquote( name, encoding=encoding, errors=errors) name = _coerce_result(name) value = nv[1].replace('+', ' ') value = compat_urllib_parse_unquote( value, encoding=encoding, errors=errors) value = _coerce_result(value) r.append((name, value)) return r def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): parsed_result = {} pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, encoding=encoding, errors=errors) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) else: parsed_result[name] = [value] return parsed_result try: from shlex import quote as compat_shlex_quote except ImportError: # Python < 3.3 def compat_shlex_quote(s): if re.match(r'^[-_\w./]+$', s): return s else: return "'" + s.replace("'", "'\"'\"'") + "'" try: args = shlex.split('中文') assert (isinstance(args, list) and isinstance(args[0], compat_str) and args[0] == '中文') compat_shlex_split = shlex.split except (AssertionError, UnicodeEncodeError): # Working around shlex issue with unicode strings on some python 2 # versions (see http://bugs.python.org/issue1548891) def compat_shlex_split(s, comments=False, posix=True): if isinstance(s, compat_str): s = s.encode('utf-8') return list(map(lambda s: s.decode('utf-8'), shlex.split(s, comments, posix))) def compat_ord(c): if type(c) is int: return c else: return ord(c) compat_os_name = os._name if os.name == 'java' else os.name if sys.version_info >= (3, 0): compat_getenv = os.getenv compat_expanduser = os.path.expanduser def compat_setenv(key, value, env=os.environ): env[key] = value else: # Environment variables should be decoded with filesystem encoding. # Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918) def compat_getenv(key, default=None): from .utils import get_filesystem_encoding env = os.getenv(key, default) if env: env = env.decode(get_filesystem_encoding()) return env def compat_setenv(key, value, env=os.environ): def encode(v): from .utils import get_filesystem_encoding return v.encode(get_filesystem_encoding()) if isinstance(v, compat_str) else v env[encode(key)] = encode(value) # HACK: The default implementations of os.path.expanduser from cpython do not decode # environment variables with filesystem encoding. We will work around this by # providing adjusted implementations. # The following are os.path.expanduser implementations from cpython 2.7.8 stdlib # for different platforms with correct environment variables decoding. if compat_os_name == 'posix': def compat_expanduser(path): """Expand ~ and ~user constructions. If user or $HOME is unknown, do nothing.""" if not path.startswith('~'): return path i = path.find('/', 1) if i < 0: i = len(path) if i == 1: if 'HOME' not in os.environ: import pwd userhome = pwd.getpwuid(os.getuid()).pw_dir else: userhome = compat_getenv('HOME') else: import pwd try: pwent = pwd.getpwnam(path[1:i]) except KeyError: return path userhome = pwent.pw_dir userhome = userhome.rstrip('/') return (userhome + path[i:]) or '/' elif compat_os_name == 'nt' or compat_os_name == 'ce': def compat_expanduser(path): """Expand ~ and ~user constructs. If user or $HOME is unknown, do nothing.""" if path[:1] != '~': return path i, n = 1, len(path) while i < n and path[i] not in '/\\': i = i + 1 if 'HOME' in os.environ: userhome = compat_getenv('HOME') elif 'USERPROFILE' in os.environ: userhome = compat_getenv('USERPROFILE') elif 'HOMEPATH' not in os.environ: return path else: try: drive = compat_getenv('HOMEDRIVE') except KeyError: drive = '' userhome = os.path.join(drive, compat_getenv('HOMEPATH')) if i != 1: # ~user userhome = os.path.join(os.path.dirname(userhome), path[1:i]) return userhome + path[i:] else: compat_expanduser = os.path.expanduser if sys.version_info < (3, 0): def compat_print(s): from .utils import preferredencoding print(s.encode(preferredencoding(), 'xmlcharrefreplace')) else: def compat_print(s): assert isinstance(s, compat_str) print(s) if sys.version_info < (3, 0) and sys.platform == 'win32': def compat_getpass(prompt, *args, **kwargs): if isinstance(prompt, compat_str): from .utils import preferredencoding prompt = prompt.encode(preferredencoding()) return getpass.getpass(prompt, *args, **kwargs) else: compat_getpass = getpass.getpass try: compat_input = raw_input except NameError: # Python 3 compat_input = input # Python < 2.6.5 require kwargs to be bytes try: def _testfunc(x): pass _testfunc(**{'x': 0}) except TypeError: def compat_kwargs(kwargs): return dict((bytes(k), v) for k, v in kwargs.items()) else: compat_kwargs = lambda kwargs: kwargs if sys.version_info < (2, 7): def compat_socket_create_connection(address, timeout, source_address=None): host, port = address err = None for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket.socket(af, socktype, proto) sock.settimeout(timeout) if source_address: sock.bind(source_address) sock.connect(sa) return sock except socket.error as _: err = _ if sock is not None: sock.close() if err is not None: raise err else: raise socket.error('getaddrinfo returns an empty list') else: compat_socket_create_connection = socket.create_connection # Fix https://github.com/rg3/youtube-dl/issues/4223 # See http://bugs.python.org/issue9161 for what is broken def workaround_optparse_bug9161(): op = optparse.OptionParser() og = optparse.OptionGroup(op, 'foo') try: og.add_option('-t') except TypeError: real_add_option = optparse.OptionGroup.add_option def _compat_add_option(self, *args, **kwargs): enc = lambda v: ( v.encode('ascii', 'replace') if isinstance(v, compat_str) else v) bargs = [enc(a) for a in args] bkwargs = dict( (k, enc(v)) for k, v in kwargs.items()) return real_add_option(self, *bargs, **bkwargs) optparse.OptionGroup.add_option = _compat_add_option if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3 compat_get_terminal_size = shutil.get_terminal_size else: _terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines']) def compat_get_terminal_size(fallback=(80, 24)): columns = compat_getenv('COLUMNS') if columns: columns = int(columns) else: columns = None lines = compat_getenv('LINES') if lines: lines = int(lines) else: lines = None if columns is None or lines is None or columns <= 0 or lines <= 0: try: sp = subprocess.Popen( ['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = sp.communicate() _lines, _columns = map(int, out.split()) except Exception: _columns, _lines = _terminal_size(*fallback) if columns is None or columns <= 0: columns = _columns if lines is None or lines <= 0: lines = _lines return _terminal_size(columns, lines) try: itertools.count(start=0, step=1) compat_itertools_count = itertools.count except TypeError: # Python 2.6 def compat_itertools_count(start=0, step=1): n = start while True: yield n n += step if sys.version_info >= (3, 0): from tokenize import tokenize as compat_tokenize_tokenize else: from tokenize import generate_tokens as compat_tokenize_tokenize try: struct.pack('!I', 0) except TypeError: # In Python 2.6 and 2.7.x < 2.7.7, struct requires a bytes argument # See https://bugs.python.org/issue19099 def compat_struct_pack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.pack(spec, *args) def compat_struct_unpack(spec, *args): if isinstance(spec, compat_str): spec = spec.encode('ascii') return struct.unpack(spec, *args) else: compat_struct_pack = struct.pack compat_struct_unpack = struct.unpack __all__ = [ 'compat_HTMLParser', 'compat_HTTPError', 'compat_basestring', 'compat_chr', 'compat_cookiejar', 'compat_cookies', 'compat_etree_fromstring', 'compat_expanduser', 'compat_get_terminal_size', 'compat_getenv', 'compat_getpass', 'compat_html_entities', 'compat_html_entities_html5', 'compat_http_client', 'compat_http_server', 'compat_input', 'compat_itertools_count', 'compat_kwargs', 'compat_ord', 'compat_os_name', 'compat_parse_qs', 'compat_print', 'compat_setenv', 'compat_shlex_quote', 'compat_shlex_split', 'compat_socket_create_connection', 'compat_str', 'compat_struct_pack', 'compat_struct_unpack', 'compat_subprocess_get_DEVNULL', 'compat_tokenize_tokenize', 'compat_urllib_error', 'compat_urllib_parse', 'compat_urllib_parse_unquote', 'compat_urllib_parse_unquote_plus', 'compat_urllib_parse_unquote_to_bytes', 'compat_urllib_parse_urlencode', 'compat_urllib_parse_urlparse', 'compat_urllib_request', 'compat_urllib_request_DataHandler', 'compat_urllib_response', 'compat_urlparse', 'compat_urlretrieve', 'compat_xml_parse_error', 'compat_xpath', 'workaround_optparse_bug9161', ]
mxamin/youtube-dl
youtube_dl/compat.py
Python
unlicense
88,570
[ "Bowtie" ]
dccf01642ab568b1d3af34b6657bafdea53e385df19a8ff22a85ee0cc8b7aad1
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """Setup.py for pymatgen.""" import sys import platform import os from setuptools import setup, find_packages, Extension from setuptools.command.build_ext import build_ext as _build_ext class build_ext(_build_ext): """Extension builder that checks for numpy before install.""" def finalize_options(self): """Override finalize_options.""" _build_ext.finalize_options(self) # Prevent numpy from thinking it is still in its setup process: import builtins if hasattr(builtins, "__NUMPY_SETUP__"): # pylint: disable=E1101 del builtins.__NUMPY_SETUP__ import importlib import numpy importlib.reload(numpy) self.include_dirs.append(numpy.get_include()) extra_link_args = [] if sys.platform.startswith("win") and platform.machine().endswith("64"): extra_link_args.append("-Wl,--allow-multiple-definition") # thanks https://stackoverflow.com/a/36693250 def package_files(directory, extensions): """ Walk package directory to make sure we include all relevant files in package. """ paths = [] for (path, directories, filenames) in os.walk(directory): for filename in filenames: if any([filename.endswith(ext) for ext in extensions]): paths.append(os.path.join("..", path, filename)) return paths json_yaml_csv_files = package_files("pymatgen", ["yaml", "json", "csv", "yaml.gz", "json.gz", "csv.gz"]) long_desc = """ Official docs: [http://pymatgen.org](http://pymatgen.org/) Pymatgen (Python Materials Genomics) is a robust, open-source Python library for materials analysis. These are some of the main features: 1. Highly flexible classes for the representation of Element, Site, Molecule, Structure objects. 2. Extensive input/output support, including support for [VASP](http://cms.mpi.univie.ac.at/vasp/), [ABINIT](http://www.abinit.org/), CIF, Gaussian, XYZ, and many other file formats. 3. Powerful analysis tools, including generation of phase diagrams, Pourbaix diagrams, diffusion analyses, reactions, etc. 4. Electronic structure analyses, such as density of states and band structure. 5. Integration with the Materials Project REST API. Pymatgen is free to use. However, we also welcome your help to improve this library by making your own contributions. These contributions can be in the form of additional tools or modules you develop, or feature requests and bug reports. Please report any bugs and issues at pymatgen's [Github page] (https://github.com/materialsproject/pymatgen). For help with any pymatgen issues, please use the [Discourse page](https://discuss.matsci.org/c/pymatgen). Why use pymatgen? ================= There are many materials analysis codes out there, both commerical and free, but pymatgen offer several advantages: 1. **It is (fairly) robust.** Pymatgen is used by thousands of researchers, and is the analysis code powering the [Materials Project](https://www.materialsproject.org). The analysis it produces survives rigorous scrutiny every single day. Bugs tend to be found and corrected quickly. Pymatgen also uses [CircleCI](https://circleci.com) and [Appveyor](https://www.appveyor.com/) for continuous integration on the Linux and Windows platforms, respectively, which ensures that every commit passes a comprehensive suite of unittests. 2. **It is well documented.** A fairly comprehensive documentation has been written to help you get to grips with it quickly. 3. **It is open.** You are free to use and contribute to pymatgen. It also means that pymatgen is continuously being improved. We will attribute any code you contribute to any publication you specify. Contributing to pymatgen means your research becomes more visible, which translates to greater impact. 4. **It is fast.** Many of the core numerical methods in pymatgen have been optimized by vectorizing in numpy/scipy. This means that coordinate manipulations are extremely fast and are in fact comparable to codes written in other languages. Pymatgen also comes with a complete system for handling periodic boundary conditions. 5. **It will be around.** Pymatgen is not a pet research project. It is used in the well-established Materials Project. It is also actively being developed and maintained by the [Materials Virtual Lab](https://www.materialsvirtuallab.org), the ABINIT group and many other research groups. With effect from version 2019.1.1, pymatgen only supports Python 3.x. Users who require Python 2.7 should install pymatgen v2018.x. """ setup( name="pymatgen", packages=find_packages(), version="2021.1.28", cmdclass={"build_ext": build_ext}, python_requires=">=3.6", install_requires=[ "numpy>=1.18.0", "requests", "ruamel.yaml>=0.15.6", "monty>=3.0.2", "scipy>=1.5.0", "tabulate", "spglib>=1.9.9.44", "networkx>=2.2", "matplotlib>=1.5", "palettable>=3.1.1", "sympy", "pandas", "plotly>=4.5.0", "uncertainties>=3.1.4", ], extras_require={ "provenance": ["pybtex"], "ase": ["ase>=3.3"], "vis": ["vtk>=6.0.0"], "abinit": ["netcdf4"], ':python_version < "3.7"': [ "dataclasses>=0.6", ], }, package_data={ "pymatgen": json_yaml_csv_files, "pymatgen.core": ["py.typed"], "pymatgen.analysis.chemenv.coordination_environments.coordination_geometries_files": ["*.txt"], "pymatgen.symmetry": ["*.sqlite"], "pymatgen.command_line": ["OxideTersoffPotentials"], }, author="Pymatgen Development Team", author_email="ongsp@eng.ucsd.edu", maintainer="Shyue Ping Ong, Matthew Horton", maintainer_email="ongsp@eng.ucsd.edu, mkhorton@lbl.gov", url="https://www.pymatgen.org", license="MIT", description="Python Materials Genomics is a robust materials " "analysis code that defines core object representations for " "structures and molecules with support for many electronic " "structure codes. It is currently the core analysis code " "powering the Materials Project " "(https://www.materialsproject.org).", long_description=long_desc, long_description_content_type="text/markdown", keywords=[ "VASP", "gaussian", "ABINIT", "nwchem", "qchem", "materials", "science", "project", "electronic", "structure", "analysis", "phase", "diagrams", "crystal", ], classifiers=[ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Development Status :: 4 - Beta", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Topic :: Scientific/Engineering :: Information Analysis", "Topic :: Scientific/Engineering :: Physics", "Topic :: Scientific/Engineering :: Chemistry", "Topic :: Software Development :: Libraries :: Python Modules", ], ext_modules=[ Extension( "pymatgen.optimization.linear_assignment", ["pymatgen/optimization/linear_assignment.c"], extra_link_args=extra_link_args, ), Extension("pymatgen.util.coord_cython", ["pymatgen/util/coord_cython.c"], extra_link_args=extra_link_args), Extension( "pymatgen.optimization.neighbors", ["pymatgen/optimization/neighbors.c"], extra_link_args=extra_link_args ), ], entry_points={ "console_scripts": [ "pmg = pymatgen.cli.pmg:main", "feff_plot_cross_section = pymatgen.cli.feff_plot_cross_section:main", "feff_plot_dos = pymatgen.cli.feff_plot_dos:main", "gaussian_analyzer = pymatgen.cli.gaussian_analyzer:main", "get_environment = pymatgen.cli.get_environment:main", ] }, )
davidwaroquiers/pymatgen
setup.py
Python
mit
8,335
[ "ABINIT", "ASE", "CRYSTAL", "Gaussian", "NWChem", "VASP", "VTK", "pymatgen" ]
079a9a073cb95765652a0f48a2d74076aa59984fbecc864b998f178392cf5650
""" GraphData encapsulates input data for the DIRAC Graphs plots The DIRAC Graphs package is derived from the GraphTool plotting package of the CMS/Phedex Project by ... <to be added> """ from __future__ import print_function __RCSID__ = "$Id$" import time import datetime import numpy from matplotlib.dates import date2num from DIRAC.Core.Utilities.Graphs.GraphUtilities import to_timestamp, pretty_float DEBUG = 0 def get_key_type(keys): """ A utility function to guess the type of the plot keys """ min_time_stamp = 1000000000 max_time_stamp = 1900000000 time_type = True num_type = True string_type = True key_type = 'unknown' for key in keys: if time_type: try: time_data = to_timestamp(key) if time_data < min_time_stamp or time_data > max_time_stamp: time_type = False except ValueError: time_type = False if num_type: try: num_data = float(key) except BaseException: num_type = False if not isinstance(key, basestring): string_type = False # Take the most restrictive type if string_type: key_type = "string" if num_type: key_type = "numeric" if time_type: key_type = "time" return key_type class GraphData: def __init__(self, data={}): self.truncated = 0 self.all_keys = [] self.labels = [] self.label_values = [] self.subplots = {} self.plotdata = None self.data = dict(data) self.key_type = 'string' self.initialize() def isEmpty(self): """ Check if there is no data inserted """ return not self.plotdata and not self.subplots def setData(self, data): """ Add data to the GraphData object """ self.data = dict(data) self.initialize() def initialize(self, key_type=None): keys = self.data.keys() if not keys: print("GraphData Error: empty data") start = time.time() if isinstance(self.data[keys[0]], dict): for key in self.data: self.subplots[key] = PlotData(self.data[key], key_type=key_type) else: self.plotdata = PlotData(self.data, key_type=key_type) if DEBUG: print("Time: plot data", time.time() - start, len(self.subplots)) if self.plotdata: self.all_keys = self.plotdata.getKeys() else: tmpset = set() for sub in self.subplots.values(): for key in sub.getKeys(): tmpset.add(key) self.all_keys = list(tmpset) if key_type: self.key_type = key_type else: self.key_type = get_key_type(self.all_keys) self.sortKeys() self.makeNumKeys() self.sortLabels() def expandKeys(self): if not self.plotdata: for sub in self.subplots: self.subplots[sub].expandKeys(self.all_keys) def isSimplePlot(self): return self.plotdata is not None def sortLabels(self, sort_type='max_value', reverse_order=False): """ Sort labels with a specified method: alpha - alphabetic order max_value - by max value of the subplot sum - by the sum of values of the subplot last_value - by the last value in the subplot avg_nozeros - by an average that excludes all zero values """ if self.plotdata: if self.key_type == "string": if sort_type in ['max_value', 'sum']: self.labels = self.plotdata.sortKeys('weight') else: self.labels = self.plotdata.sortKeys() if reverse_order: self.labels.reverse() self.label_values = [self.plotdata.parsed_data[l] for l in self.labels] else: if sort_type == 'max_value': pairs = zip(self.subplots.keys(), self.subplots.values()) reverse = not reverse_order pairs.sort(key=lambda x: x[1].max_value, reverse=reverse) self.labels = [x[0] for x in pairs] self.label_values = [x[1].max_value for x in pairs] elif sort_type == 'last_value': pairs = zip(self.subplots.keys(), self.subplots.values()) reverse = not reverse_order pairs.sort(key=lambda x: x[1].last_value, reverse=reverse) self.labels = [x[0] for x in pairs] self.label_values = [x[1].last_value for x in pairs] elif sort_type == 'sum': pairs = [] for key in self.subplots: pairs.append((key, self.subplots[key].sum_value)) reverse = not reverse_order pairs.sort(key=lambda x: x[1], reverse=reverse) self.labels = [x[0] for x in pairs] self.label_values = [x[1] for x in pairs] elif sort_type == 'alpha': self.labels = self.subplots.keys() self.labels.sort() if reverse_order: self.labels.reverse() self.label_values = [self.subplots[x].sum_value for x in self.labels] elif sort_type == 'avg_nozeros': pairs = zip(self.subplots.keys(), self.subplots.values()) reverse = not reverse_order pairs.sort(key=lambda x: x[1].avg_nozeros, reverse=reverse) self.labels = [x[0] for x in pairs] self.label_values = [x[1].avg_nozeros for x in pairs] else: self.labels = self.subplots.keys() if reverse_order: self.labels.reverse() def sortKeys(self): """ Sort the graph keys in a natural order """ if self.plotdata: self.plotdata.sortKeys() self.all_keys = self.plotdata.getKeys() else: self.all_keys.sort() self.min_key = min(self.all_keys) self.max_key = max(self.all_keys) def makeNumKeys(self): """ Make numerical representation of the graph keys suitable for plotting """ self.all_num_keys = [] if self.key_type == "string": self.all_string_map = {} next = 0 for key in self.all_keys: self.all_string_map[key] = next self.all_num_keys.append(next) next += 1 elif self.key_type == "time": self.all_num_keys = [date2num(datetime.datetime.fromtimestamp(to_timestamp(key))) for key in self.all_keys] elif self.key_type == "numeric": self.all_num_keys = [float(key) for key in self.all_keys] self.min_num_key = min(self.all_num_keys) self.max_num_key = max(self.all_num_keys) def makeCumulativeGraph(self): """ Prepare data for the cumulative graph """ self.expandKeys() if self.plotdata: self.plotdata.makeCumulativePlot() if self.truncated: self.otherPlot.makeCumulativePlot() if self.subplots: for label in self.subplots: self.subplots[label].makeCumulativePlot() self.sortLabels(sort_type='last_value') def getLabels(self): """ Get the graph labels together with the numeric values used for the label sorting """ labels = [] if self.plotdata: if self.key_type != 'string': labels = [('NoLabels', 0.)] else: labels = zip(self.labels, self.label_values) elif self.truncated: tlabels = self.labels[:self.truncated] tvalues = self.label_values[:self.truncated] labels = zip(tlabels, tvalues) labels.append(('Others', sum(self.label_values[self.truncated:]))) else: labels = zip(self.labels, self.label_values) return labels def getStringMap(self): """ Get string to number mapping for numeric type keys """ return self.all_string_map def getNumberOfKeys(self): return len(self.all_keys) def getNumberOfLabels(self): if self.truncated: return self.truncated + 1 else: return len(self.labels) def getPlotNumData(self, label=None, zipFlag=True): """ Get the plot data in a numeric form """ if self.plotdata: if zipFlag: return zip(self.plotdata.getNumKeys(), self.plotdata.getValues(), self.plotdata.getPlotErrors()) else: return self.plotdata.getValues() elif label is not None: if label == "Others": return self.otherPlot.getPlotDataForNumKeys(self.all_num_keys) else: return self.subplots[label].getPlotDataForNumKeys(self.all_num_keys) else: # Get the sum of all the subplots self.expandKeys() arrays = [] for label in self.subplots: arrays.append(numpy.array([x[1] for x in self.subplots[label].getPlotDataForNumKeys(self.all_num_keys, True)])) sum_array = sum(arrays) if zipFlag: return zip(self.all_num_keys, list(sum_array)) else: return sum_array def truncateLabels(self, limit=10): """ Truncate the number of labels to the limit, leave the most important ones, accumulate the rest in the 'Others' label """ if self.plotdata: return nLabels = len(self.labels) if nLabels <= limit: return self.truncated = limit new_labels = self.labels[:limit] new_labels.append('Others') other_data = {} for key in self.all_keys: other_data[key] = 0. for label in self.labels: if label not in new_labels: for key in self.all_keys: if key in self.subplots[label].parsed_data: other_data[key] += self.subplots[label].parsed_data[key] self.otherPlot = PlotData(other_data) def getStats(self): """ Get statistics of the graph data """ numData = self.getPlotNumData(zipFlag=False) if not len(numData): # pylint: disable=len-as-condition return 0, 0, 0, 0 numData = numpy.array(numData) min_value = numData.min() max_value = numData.max() average = float(numData.sum()) / len(numData) current = numData[-1] return min_value, max_value, average, current def getStatString(self, unit=None): """ Get a string summarizing the graph data statistics """ min_value, max_value, average, current = self.getStats() tmpList = [] unitString = '' if unit: unitString = str(unit) if max_value: try: s = "Max: " + pretty_float(max_value) + " " + unitString tmpList.append(s.strip()) except BaseException: pass if min_value: try: s = "Min: " + pretty_float(min_value) + " " + unitString tmpList.append(s.strip()) except BaseException: pass if average: try: s = "Average: " + pretty_float(average) + " " + unitString tmpList.append(s.strip()) except BaseException: pass if current: try: s = "Current: " + pretty_float(current) + " " + unitString tmpList.append(s.strip()) except BaseException: pass resultString = ', '.join(tmpList) return resultString class PlotData: """ PlotData class is a container for a one dimensional plot data """ def __init__(self, data, single=True, key_type=None): self.key_type = "unknown" keys = data.keys() if not keys: print("PlotData Error: empty data") return # Original data self.data = dict(data) # Working copy of the parsed data self.parsed_data = {} self.parsed_errors = {} # Keys and values as synchronized lists self.keys = [] self.num_keys = [] self.values = [] self.errors = [] self.sorted_keys = [] # Do initial data parsing self.parseData(key_type) if single: self.initialize() def initialize(self): if self.key_type == "string": self.keys = self.sortKeys('weight') else: self.keys = self.sortKeys() self.values = [self.parsed_data.get(k, 0.0) for k in self.keys] self.errors = [self.parsed_errors.get(k, 0.0) for k in self.keys] values_to_sum = [self.parsed_data.get(k, 0.0) for k in self.keys if k != ''] self.real_values = [] for k in self.keys: if self.parsed_data[k] is not None: self.real_values.append(self.parsed_data[k]) self.values_sum = float(sum(self.real_values)) # Prepare numerical representation of keys for plotting self.num_keys = [] if self.key_type == "string": self.string_map = {} next = 0 for key in self.keys: self.string_map[key] = next self.num_keys.append(next) next += 1 elif self.key_type == "time": self.num_keys = [date2num(datetime.datetime.fromtimestamp(to_timestamp(key))) for key in self.keys] elif self.key_type == "numeric": self.num_keys = [float(key) for key in self.keys] self.min_value = float(min(self.real_values)) self.max_value = float(max(self.real_values)) self.min_key = self.keys[0] self.max_key = self.keys[-1] self.sum_value = float(sum(self.real_values)) self.last_value = float(self.real_values[-1]) count = len(filter(lambda a: a != 0, self.real_values)) if count != 0: self.avg_nozeros = self.sum_value / float(count) else: self.avg_nozeros = 0 def expandKeys(self, all_keys): """ Fill zero values into the missing keys """ for k in all_keys: if k not in self.parsed_data: self.parsed_data[k] = 0. self.sorted_keys = [] self.keys = self.parsed_data.keys() self.initialize() def sortKeys(self, sort_type='alpha'): """ Sort keys according to the specified method : alpha - sort in alphabetic order weight - sort in the order of values """ if self.sorted_keys: return self.sorted_keys if sort_type == 'weight': pairs = zip(self.parsed_data.keys(), self.parsed_data.values()) pairs.sort(key=lambda x: x[1], reverse=True) self.sorted_keys = [x[0] for x in pairs] elif sort_type == 'alpha': self.sorted_keys = self.keys self.sorted_keys.sort() else: print("Unknown sorting type:", sort_type) return self.sorted_keys def __data_size(self, item): """ Determine a numerical size for the data; this is used to sort the keys of the graph. If the item is a tuple, take the absolute value of the first entry. Otherwise, attempt to take the absolute value of that item. If that fails, just return -1. """ if isinstance(item, tuple): return abs(item[0]) try: return abs(item) except TypeError: return - 1 def parseKey(self, key): """ Parse the name of the pivot; this is the identity function. """ if self.key_type == "time": return to_timestamp(key) else: return key def parseDatum(self, data): """ Parse the specific data value; this is the identity. """ if isinstance(data, basestring) and "::" in data: datum, error = data.split("::") elif isinstance(data, tuple): datum, error = data else: error = 0. datum = data try: resultD = float(datum) except BaseException: resultD = None try: resultE = float(error) except BaseException: resultE = None return (resultD, resultE) def parseData(self, key_type=None): """ Parse all the data values passed to the graph. For this super class, basically does nothing except loop through all the data. A sub-class should override the parseDatum and parse_pivot functions rather than this one. """ if key_type: self.key_type = key_type else: self.key_type = get_key_type(self.data.keys()) new_parsed_data = {} new_passed_errors = {} for key, data in self.data.items(): new_key = self.parseKey(key) data, error = self.parseDatum(data) # if data != None: new_parsed_data[new_key] = data new_passed_errors[new_key] = error self.parsed_data = new_parsed_data self.parsed_errors = new_passed_errors self.keys = self.parsed_data.keys() def makeCumulativePlot(self): if not self.sorted_keys: self.sortKeys() cum_values = [] if self.values[0] is None: cum_values.append(0.) else: cum_values.append(self.values[0]) for i in range(1, len(self.values)): if self.values[i] is None: cum_values.append(cum_values[i - 1]) else: cum_values.append(cum_values[i - 1] + self.values[i]) self.values = cum_values self.last_value = float(self.values[-1]) def getPlotData(self): return self.parsed_data def getPlotErrors(self): return self.parsed_errors def getPlotNumData(self): return zip(self.num_keys, self.values, self.errors) def getPlotDataForKeys(self, keys): result_pairs = [] for key in keys: if key in self.parsed_data: result_pairs.append(key, self.parsed_data[key], self.parsed_errors[key]) else: result_pairs.append(key, None, 0.) return result_pairs def getPlotDataForNumKeys(self, num_keys, zeroes=False): result_pairs = [] for num_key in num_keys: try: ind = self.num_keys.index(num_key) if self.values[ind] is None and zeroes: result_pairs.append((self.num_keys[ind], 0., 0.)) else: result_pairs.append((self.num_keys[ind], self.values[ind], self.errors[ind])) except ValueError: if zeroes: result_pairs.append((num_key, 0., 0.)) else: result_pairs.append((num_key, None, 0.)) return result_pairs def getKeys(self): return self.keys def getNumKeys(self): return self.num_keys def getValues(self): return self.values def getMaxValue(self): return max(self.values) def getMinValue(self): return min(self.values)
chaen/DIRAC
Core/Utilities/Graphs/GraphData.py
Python
gpl-3.0
17,366
[ "DIRAC" ]
a137ced7e84a91daedc2ebedf0b82790d8d3741ea0f06f81c090ecd70e0f744e
import cv2 import numpy as np from matplotlib import pyplot as plt import matplotlib.patches as mpatches from skimage.morphology import skeletonize, thin, medial_axis, closing, square from skimage.util import invert from skimage.color import rgb2gray, label2rgb from skimage import data,img_as_ubyte from skimage.filters import threshold_otsu from skimage.segmentation import clear_border from skimage.measure import label, regionprops from PIL import Image InputFolder='RawInput/' OutputFolder='Output/' ImageList= ["459591", "406786" ,"423690", "410200"] kernel = np.ones((5,5),np.uint8) kernel_size=str(kernel.shape) def main(): global img global image, Lable_Img,label_thresholding, Org_Lable_Img, Org_Sample_Img for image in ImageList: print image img = cv2.imread(InputFolder + image + ".jpg" , 0) Org_Lable_Img= Image.open(InputFolder + image + ".svs_labels.bmp") Org_Sample_Img = Image.open(InputFolder + image + ".jpg") Lable_Img = cv2.imread(InputFolder + image + ".svs_labels.bmp" , 0) Thresholding() #Plot_OtsuThresholding() Closing() #Invert() #Plot_Closing("_KernelSize_" + kernel_size) Invert() Thining() Label_Thresholding() #Plot_BinaryThresholding() BoundingBox(5000) #20000 SOME do not work #15000 all work for 4. If the image is total vetical, the average size is around 8000, here I take 3000 to keep the small pieces as well as to prevent too small pieces (1000) #np.savetxt('test.txt', thinned3) #Plot_Thinning("_Invert") # apply threshold #plt.figure(figsize=(width/DPI,height/DPI)) def Thresholding(): global th1, th2, th3, th4,blur,blur2 # global thresholding ret1,th1 = cv2.threshold(img,200,255,cv2.THRESH_BINARY) # Otsu's thresholding ret2,th2 = cv2.threshold(img,0,255,cv2.THRESH_OTSU) # Otsu's thresholding after Gaussian filtering blur = cv2.GaussianBlur(img,(75,75),0) #(75,75) blur2 = cv2.GaussianBlur(img,(25,25),0) #(25,25) ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_OTSU) ret4,th4 = cv2.threshold(blur2,0,255,cv2.THRESH_OTSU) def Label_Thresholding(): global label_thresholding, Lable_Img label_thresholding=rgb2gray(Lable_Img) label_thresholding = invert(label_thresholding) ret10,label_thresholding = cv2.threshold(label_thresholding,10,255,cv2.THRESH_BINARY) #Important label_thresholding=np.where(label_thresholding>np.mean(label_thresholding),1,0) # ret10,label_thresholding = cv2.threshold(Lable_Img,127,255,cv2.THRESH_BINARY) def Closing(): global closing1, closing2, closing3 closing1 = cv2.morphologyEx(th2, cv2.MORPH_CLOSE, kernel) closing2 = cv2.morphologyEx(th4, cv2.MORPH_CLOSE, kernel) closing3 = cv2.morphologyEx(th3, cv2.MORPH_CLOSE, kernel) def Invert(): global closing1, closing2, closing3 closing1 = invert(closing1) closing2 = invert(closing2) closing3 = invert(closing3) def Thining(): global closing1, closing2, closing3, skeleton1, skeleton2, skeleton3, thinned1,thinned2,thinned3 closing1=rgb2gray(closing1) closing1=np.where(closing1>np.mean(closing1),1,0) skeleton1 = skeletonize(closing1) closing2=rgb2gray(closing2) closing2=np.where(closing2>np.mean(closing2),1,0) skeleton2 = skeletonize(closing2) closing3=rgb2gray(closing3) closing3=np.where(closing3>np.mean(closing3),1,0) skeleton3 = skeletonize(closing3) thinned1= thin(closing1) thinned2= thin(closing2) thinned3= thin(closing3) #thinned3 = img_as_ubyte(thinned3) def BoundingBox(ReginThreshold): global label_thresholding, closing3,thinned3,Org_Lable_Img, img, Org_Sample_Img Closing3=closing3 #thinned3 thresh = threshold_otsu(Closing3) bw = closing(Closing3 > thresh, square(3)) # remove artifacts connected to image border cleared = clear_border(bw) thinned3=np.where(thinned3>np.mean(thinned3),1,0) # label image regions label_image = label(cleared) image_label_overlay = label2rgb(label_image, image=Closing3) #fig, ax = plt.subplots(figsize=(10, 6)) #ax.imshow(image_label_overlay,cmap=plt.cm.gray) for region in regionprops(label_image): # take regions with large enough areas if region.area >= ReginThreshold: #600 # draw rectangle around segmented coins minr, minc, maxr, maxc = region.bbox rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='red', linewidth=2) print minc, minr, maxc, maxr #ax.add_patch(rect) #ax.set_axis_off() #plt.tight_layout() #plt.show() #plt.savefig(OutputFolder + "BoundingBox_" + Image+ "_RegionThreshold_" +str(ReginThreshold),dpi=300) number=1 Org_Lable_Img=img_as_ubyte(Org_Lable_Img) Org_Sample_Img=img_as_ubyte(Org_Sample_Img) for region in regionprops(label_image): if region.area >= ReginThreshold: #600 # draw rectangle around segmented coins minr, minc, maxr, maxc = region.bbox Box_Sample_OrgImg = Org_Sample_Img[minr:maxr, minc:maxc] Box_Label_OrgImg = Org_Lable_Img[minr:maxr, minc:maxc] Box_Sample = thinned3[minr:maxr, minc:maxc] Box_Label = label_thresholding[minr:maxr, minc:maxc] Box_Label = img_as_ubyte(Box_Label) Box_Sample = img_as_ubyte(Box_Sample) Box_Mixed= cv2.bitwise_or(Box_Sample, Box_Label) #Box_OrgMixed=cv2.add(Box_Sample_OrgImg, Box_Label_OrgImg) #print Box_Sample_OrgImg, Box_Label_OrgImg width, length, height= Box_Label_OrgImg.shape #Box_Sample_OrgImg=invert(Box_Sample_OrgImg) #Box_Label_OrgImg=(Box_Label_OrgImg) #print width, length, height Box_OrgMixed=np.zeros(shape=(width, length, height)) Sample_Mixed=np.zeros(shape=(width, length, height)) for l in range(length): for w in range(width): if Box_Label_OrgImg[w,l,0]>=255 & Box_Label_OrgImg[w,l,1]>=255 & Box_Label_OrgImg[w,l,2]>=255: Box_OrgMixed[w,l]=(Box_Sample_OrgImg[w,l]) else: Box_OrgMixed[w,l]=(Box_Label_OrgImg[w,l]) if Box_Sample[w,l]==0: Sample_Mixed[w,l]=(Box_Sample_OrgImg[w,l]) else: Sample_Mixed[w,l]=(0,0,0) width, length= Box_Label.shape LineLength=0 #if Box_Label[w,l+1] #print w,l,Box_Label_OrgImg[w,l,0],Box_Label_OrgImg[w,l,1],Box_Label_OrgImg[w,l,2] #Box_OrgMixed = (Box_Sample_OrgImg+ Box_Label_OrgImg) Box_OrgMixed=Box_OrgMixed.astype(np.uint8) Sample_Mixed=Sample_Mixed.astype(np.uint8) fig, ax = plt.subplots(2,3,figsize=(20, 12)) ax[0,0].imshow(Box_Sample_OrgImg, cmap=plt.cm.gray) ax[0,0].set_axis_off() ax[0,1].imshow(Box_Label_OrgImg, cmap=plt.cm.gray) ax[0,1].set_axis_off() ax[0,2].imshow((Box_OrgMixed)) ax[0,2].set_axis_off() ax[1,0].imshow(Box_Sample, cmap=plt.cm.gray) ax[1,0].set_axis_off() ax[1,1].imshow(Box_Label, cmap=plt.cm.gray) ax[1,1].set_axis_off() ax[1,2].imshow(Box_Mixed, cmap=plt.cm.gray) ax[1,2].set_axis_off() #ax[2,0].imshow(Sample_Mixed) plt.savefig(OutputFolder+"MergedBox_Thin_Label"+image+"_"+str(number),dpi=300) plt.tight_layout() #plt.show() fig, ax = plt.subplots(1,1,figsize=(10, 6)) ax.imshow(Sample_Mixed) #plt.show() #plt.savefig(OutputFolder+"MergedBox_OrgSample_Thin_"+image+"_"+str(number),dpi=300) #plt.imsave(OutputFolder+"Segmentated_Thinned"+image+"_"+str(number) +".png",Box_Sample,cmap=plt.cm.gray) #plt.imsave(OutputFolder+"Segmentated_Thinned"+image+"_"+str(number) +"_Label.png",Box_Label,cmap=plt.cm.gray) plt.imsave(OutputFolder+"Segmentated_Mixed_Thinned"+image+"_"+str(number) +".png",Box_Mixed,cmap=plt.cm.gray) number+=1 #def calculating(): # global Box_Label # width, length= Box_Label.shape # length=0 # for w in range(width): # for l in range(length): # if Box_Label[w,l]=1: # if Box_Label[w+1,l]+Box_Label[w+1,l+1]+Box_Label[w,l+1]+Box_Label[w-1,l]+Box_Label[w+1,l] def Plot_OtsuThresholding(): global blur2 images = [img, 0, th1, img, 0, th2, blur2,0,th4, blur, 0, th3] titles = ['Original Noisy Image','Histogram','Global Thresholding (v=127)', 'Original Noisy Image','Histogram',"Otsu's Thresholding", 'Gaussian filtered Image (25,25)','Histogram',"Otsu's Thresholding", 'Gaussian filtered Image (75,75)','Histogram',"Otsu's Thresholding"] for i in xrange(4): plt.subplot(4,3,i*3+1), plt.imshow(images[i*3],'gray') plt.title(titles[i*3]), plt.xticks([]), plt.yticks([]) plt.subplot(4,3,i*3+2),plt.hist(images[i*3].ravel(),256) plt.title(titles[i*3+1]), plt.xticks([]), plt.yticks([]) plt.subplot(4,3,i*3+3),plt.imshow(images[i*3+2],'gray') plt.title(titles[i*3+2]), plt.xticks([]), plt.yticks([]) plt.show() #plt.savefig(OutputFolder + "Thredsholding_" + image,dpi=300) def Plot_BinaryThresholding(): plt.subplot(2,1,1), plt.imshow(Org_Lable_Img) plt.subplot(2,1,2), plt.imshow(label_thresholding,'gray') #plt.show() plt.savefig(OutputFolder + "Label_" + image,dpi=300) #np.savetxt('markers.txt', label_thresholding) def Plot_Closing(suffix): fig=plt.figure(figsize=(20,12)) plt.subplot(3,2,1),plt.imshow(th2,'gray') plt.title("Otsu's Thresholding"), plt.xticks([]), plt.yticks([]) plt.subplot(3,2,2),plt.imshow(closing1,'gray') plt.title("Closing_"+kernel_size), plt.xticks([]), plt.yticks([]) plt.subplot(3,2,3),plt.imshow(th4,'gray') plt.title("Gaussian filtered (25,25) + Otsu's Thresholding"), plt.xticks([]), plt.yticks([]) plt.subplot(3,2,4),plt.imshow(closing2,'gray') plt.title("Closing_"+kernel_size), plt.xticks([]), plt.yticks([]) plt.subplot(3,2,5),plt.imshow(th3,'gray') plt.title("Gaussian filtered (75,75) + Otsu's Thresholding"), plt.xticks([]), plt.yticks([]) plt.subplot(3,2,6),plt.imshow(closing3,'gray') plt.title("Closing_"+kernel_size), plt.xticks([]), plt.yticks([]) #plt.show() # plt.savefig(OutputFolder+"Closing_"+image+suffix,dpi=200) plt.imsave(OutputFolder+"Closing3_"+kernel_size +".png",closing3,cmap=plt.cm.gray) def Plot_Thinning(suffix): fig=plt.figure(figsize=(30,12)) plt.subplot(3,3,1),plt.imshow(closing1,'gray') plt.title("Otsu's Thresholding +"+"Closing_"+kernel_size), plt.xticks([]), plt.yticks([]) plt.subplot(3,3,2),plt.imshow(thinned1,'gray') plt.title("Thining"), plt.xticks([]), plt.yticks([]) plt.subplot(3,3,3),plt.imshow(closing2,'gray') plt.title("Gaussian filtered (25,25) + Otsu's Thresholding +" + "Closing_"+kernel_size), plt.xticks([]), plt.yticks([]) plt.subplot(3,3,4),plt.imshow(thinned2,'gray') plt.title("Thining"), plt.xticks([]), plt.yticks([]) plt.subplot(3,3,5),plt.imshow(closing3,'gray') plt.title("Gaussian filtered (75,75) + Otsu's Thresholding + Closing_"+kernel_size), plt.xticks([]), plt.yticks([]) plt.subplot(3,3,6),plt.imshow(thinned3,'gray') plt.title("Thining"), plt.xticks([]), plt.yticks([]) plt.title("Gaussian filtered (75,75) + Otsu's Thresholding + Closing_"+kernel_size), plt.xticks([]), plt.yticks([]) plt.subplot(3,3,7),plt.imshow(skeleton1,'gray') plt.title("Skeleton"), plt.xticks([]), plt.yticks([]) plt.subplot(3,3,8),plt.imshow(skeleton2,'gray') plt.title("Skeleton"), plt.xticks([]), plt.yticks([]) plt.subplot(3,3,9),plt.imshow(skeleton3,'gray') plt.title("Skeleton"), plt.xticks([]), plt.yticks([]) plt.savefig(OutputFolder+"Thining_"+image+suffix,dpi=300) #plt.show() fig=plt.figure(figsize=(10,6)) plt.subplot(1,1,1),plt.imshow(thinned3,'gray') plt.xticks([]), plt.yticks([]) #plt.title("Thining"), #plt.title("Gaussian filtered (75,75) + Otsu's Thresholding + Closing_"+kernel_size), plt.xticks([]), plt.yticks([]) #plt.savefig(OutputFolder+"Along_Thining_"+image+suffix,dpi=300) #plt.imsave(OutputFolder+"Along_Thinned"+kernel_size +".png",thinned3,cmap=plt.cm.gray) #plt.imsave(OutputFolder+"Along_Thinned"+kernel_size +".png",thinned3,cmap=plt.cm.gray) if __name__ == '__main__': main()
YubinXie/Computational-Pathology
thresholded.py
Python
gpl-2.0
12,038
[ "Gaussian" ]
d8b632c12e77a93844b80fd35486159da73b97b3ad0113a37536a7558b148035
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import matplotlib matplotlib.use('TkAgg') from matplotlib import pyplot as plt from data import get_value, draw_sky import numpy as np import tensorflow as tf from edward.models import Normal, Uniform M = 300 # number of galaxies N = 3 # number of Dark Halos # (x, y) ~ Uniform([0, 0], [4200, 4200]) galaxies_pos = Uniform( a=np.full(shape=(M, 2), fill_value=0., dtype='float32'), b=np.full(shape=(M, 2), fill_value=4200., dtype='float32')) # (X, Y) ~ Uniform([0, 0], [4200, 4200]) halos_pos = Uniform( a=np.full(shape=(N, 2), fill_value=0., dtype='float32'), b=np.full(shape=(N, 2), fill_value=4200., dtype='float32')) # mass of the dark halos halos_mass = Uniform( a=np.full(shape=(N,), fill_value=40, dtype='float32'), b=np.full(shape=(N,), fill_value=180, dtype='float32') ) # ====== calculate the distance from galaxies to halos ====== # # tricky to calculate euclidean distance between 2 matrices but # this broadcast trick would do the job. euclidean_distance = tf.square( tf.subtract( galaxies_pos, # shape=(M, 2) tf.expand_dims(halos_pos, axis=1) # shape=(N, 1, 2) ) # shape=(N, M, 2) ) distance_factor = tf.divide(1., euclidean_distance) # shape=(N, M, 2) # multiply with the log of mass mass_factor = tf.log(tf.reshape(halos_mass, (N, 1, 1))) # shape=(N, 1, 1) mean = tf.reduce_sum( distance_factor * mass_factor, # shape=(N, M, 2) axis=(0,) ) # shape=(M, 2) # ====== ellipticity of the galaxies ====== # # e ~ Normal(∑ 1 / distance * log(mass), sigma) # I don't know what is the value of sigma, so why not give it # an Uniform distribution, we give each galaxy a different sigma :D sigma = Uniform( a=np.full(shape=(M, 2), fill_value=0.12, dtype='float32'), b=np.full(shape=(M, 2), fill_value=0.33, dtype='float32') ) galaxies_elp = Normal( mu=mean, sigma=sigma, ) # ====== happy sampling ====== # galXY, halXY, halMAS, galE, sigma = get_value( [galaxies_pos.value(), halos_pos.value(), halos_mass.value(), galaxies_elp.value(), sigma.value()] ) print("Galaxies position:", galXY.shape) print("Galaxies ellipticity:", galE.shape) print("Halos position:", halXY.shape) print("Halos mass:", halMAS.shape) print("Sigma:", sigma.shape) # ====== visualize the generated sky ====== # plt.figure(figsize=(8, 8), dpi=180) draw_sky(galaxies=np.concatenate([galXY, galE], axis=-1), halos=[N] + halXY.ravel().tolist()) plt.show(block=True)
trungnt13/BAY2-uef17
4_generate_artificial_sky.py
Python
gpl-3.0
2,534
[ "Galaxy" ]
a60f699c7d587451ec987b6a89510b55be88c4b7645dbbd93487b70af14f34ac
# -*- coding: utf-8 -*- import cPickle, os, time, ConfigParser import numpy as np import ROOT, sys from multiprocessing import Process, Queue from scale.scale import Scale sql = True try: from temdbWO import Storage except ImportError: sql = False print 'database is unavailable' class Constants: c = 299792458.0 # speed of light [m/s] me = 0.510998910e+6 # electron rest energy [eV] h = 4.13566752e-15 # Plank constant [eV*s] hbar = 6.58211928e-16 # Plank constant reduced [eV*s] Bo = me**2/hbar/c**2 # Bo = me[eV]^2 / hbar[eV*s] / c[m/s]^2 [T] RemB = 1.e+6/me/Bo # i.e. [1 MeV] / Bo [T] / me [eV] # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- class EDGE(Constants): def __init__(self, scalefile, cfg_file, folder): self.CO_Grating = {0 : 5.426463e-6, 250: 5.426463e-6, 252: 5.426463e-6, 322: 5.604325e-6, 324: 5.604325e-6, 325: 5.617221e-6, 327: 5.617221e-6} self.exclude = [] cfg = ConfigParser.ConfigParser(); cfg.read(cfg_file) self.MinAmp = cfg.getfloat( 'edge', 'MinAmpEdge') self.Ranger = cfg.getfloat( 'edge', 'EdgeRanger') self.Merger = cfg.getint( 'edge', 'BinsMerger') self.ETuner = cfg.getfloat( 'edge', 'EveppTuner') self.BTuner = cfg.getfloat( 'edge', 'BveppTuner') self.Radius = cfg.getfloat( 'edge', 'VeppRadius') self.Asymme = cfg.getboolean('edge', 'Asymmetry') self.EdgeP2 = cfg.getboolean('edge', 'EdgePoly2') self.SaveDB = cfg.getboolean('edge', 'SaveForSND') and sql self.negate = cfg.getboolean('edge', 'NegativeCV') lwave = cfg.getfloat( 'edge', 'WaveLength') # laser wavelength [m] Constants.wo = Constants.h*Constants.c/lwave # laser photon energy [eV] Constants.Eo = 0.25e-6 * Constants.me**2 / Constants.wo # (me^2/4wo) [MeV] # for i in range(10): # b, e = 'ex_from_%1d' % (i), 'ex_upto_%1d' % (i) # if cfg.has_option('edge', b) and cfg.has_option('edge', e): # self.exclude.append([ cfg.getfloat('edge', b), cfg.getfloat('edge', e) ]) # else: break self.cc = ROOT.TCanvas('cc','BEMS for VEPP-2000', 800, 600, 800, 600) self.const = ROOT.TF1('const', '[0]') self.simple = ROOT.TF1('simple', EdgeSimple(), 0, 1, 7 ); self.simple.SetLineColor(ROOT.kRed) self.spreso = ROOT.TF1('spreso', HPGeSpread(), 0, 1, 3 ); self.convol = ROOT.TF1Convolution('simple', 'spreso', -1, 1); self.convol.SetNofPointsFFT(1000) self.comple = ROOT.TF1('comple', self.convol, 1.0, 2.0, 10); self.comple.SetNpx(1000) self.Legend = ROOT.TLegend(0.6, 0.6, 0.95, 0.95, '', 'brNDC'); self.HPGe = Scale(scalefile, cfg_file, 'application') self.plots = EMSResults(cfg_file, folder) # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- def __del__(self): self.cc.cd(); self.cc.Clear() # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- def Go(self, UTB, UTE, hps, filechain, grating): if 0.15<Constants.wo<0.95: if self.CO_Grating.has_key(grating): print '%3d: %8.6f um' % (grating, 1.e+6*self.CO_Grating[grating]) Constants.wo = Constants.h*Constants.c/self.CO_Grating[grating] # laser photon energy [eV] Constants.Eo = 0.25e-6 * Constants.me**2 / Constants.wo # (me^2/4wo) [MeV] else: print grating raw_input() return False R = VEPP2K_DB().GetRunInfo(filechain) if not R: R = {'E':970.5, 'dE':0.1, 'I':0.0, 'dI':0.0, 'B':0.0, 'dB':0.0} if R: print ' ╔ VEPP2K conditions: ══════╤═════════════════════════╤═══════════════════════════╗' print ' ║ E = %7.2f ± %5.2f MeV │ Ie = %5.1f ± %5.1f mA │ Bo = %7.5f ± %7.5f T ║' % (R['E'], R['dE'], R['I'], R['dI'], R['B'], R['dB']) print ' ╚══════════════════════════╧═════════════════════════╧═══════════════════════════╝\n' self.VEPP2K = R if abs(self.ETuner)<20.0: self.Eo = self.ETuner + self.VEPP2K['E'] # [MeV] else: self.Eo = self.ETuner # [MeV] if self.Eo<150.: return if self.Radius: self.Bo = 1.e+8*self.Eo/Constants.c/self.Radius # [T] elif not self.VEPP2K.has_key('B'): self.Bo = 1.e+8*self.Eo/Constants.c/140.0 # [T] elif self.VEPP2K['B']<0.1: self.Bo = 1.e+8*self.Eo/Constants.c/140.0 # [T] elif abs(self.BTuner)<0.1: self.Bo = self.VEPP2K['B'] + self.BTuner # [T] else: self.Bo = self.BTuner # [T] else: self.Eo = 0.0; return k = 4.e+6*self.Eo*Constants.wo/Constants.me**2; # [eV*eV / eV**2] Wmax = 1.e+3*self.Eo*k/(1.+k) # [keV] print "Fit initial beam energy: %7.2f MeV " % self.Eo zero, gain = self.GetCalibrationResults(0.5*(UTB+UTE), Wmax) if gain: nbins = hps.GetNbinsX(); hps.SetBins(nbins, zero, zero + gain * nbins) self.hps = hps.Clone(); self.hps.Rebin(self.Merger); self.hps.GetXaxis().SetTitle('E_{#gamma} [keV]') # get rid of spikes: # self.EP.append(565) for spike in self.EP: lo = 1 + int((spike - zero - 4 * self.RL)/(gain*float(self.Merger))) hi = 1 + int((spike - zero + 5 * self.RR)/(gain*float(self.Merger))) for ch in range(lo, hi): self.hps.SetBinContent(ch, 0.0); self.hps.SetBinError(ch, 0.0) Wmax = self.fitEdgeSimple(Wmax, self.Ranger) self.NicePicture() if Wmax: Results = self.fitEdgeComple(Wmax, self.Ranger) self.NicePicture() self.comple.Draw('SAME'); self.Legend.Draw('SAME'); self.cc.Modified(); self.cc.Update() #self.hps.Draw('SAME'); self.cc.Modified(); self.cc.Update() if Results: self.plots.AddPoint( UTB, UTE, Results) if self.SaveDB: Save_for_SND(UTB, UTE, Results) Results['T'] = [0.5*(UTB + UTE), 0.5*(UTE - UTB)] return Results else: print 'No Edge?' self.cc.cd(); self.cc.Clear(); self.cc.SetGrid() self.hps.Draw(''); self.cc.Modified(); self.cc.Update() return False else: return False # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- def NicePicture(self): if self.negate: self.cc.SetFillColor(923); self.cc.SetFrameFillColor(1); self.cc.SetFrameLineColor(0) self.hps.SetMarkerColor(ROOT.kCyan); self.hps.SetLineColor(ROOT.kCyan) self.hps.GetXaxis().SetAxisColor(0); self.hps.GetXaxis().SetTitleColor(0); self.hps.GetXaxis().SetLabelColor(0) self.hps.GetYaxis().SetAxisColor(0); self.hps.GetYaxis().SetTitleColor(0); self.hps.GetYaxis().SetLabelColor(0) self.Legend.SetFillColor(923); self.Legend.SetLineColor(0); self.Legend.SetTextColor(0) self.simple.SetLineColor(ROOT.kOrange) self.comple.SetLineColor(ROOT.kPink) self.cc.cd(); self.cc.Clear(); self.cc.SetGrid(); self.hps.SetMarkerStyle(24) self.hps.Draw(''); self.simple.Draw('SAME'); self.cc.Modified(); self.cc.Update() if self.negate: self.cc.FindObject('title').SetFillColor(923) self.cc.FindObject('title').SetTextColor(0) self.cc.Modified(); self.cc.Update() # self.cc.ls() # raw_input() # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- def fitEdgeSimple(self,W,LBK): K = 2.e+3*W/Constants.me; Wmin = W/(1+K); E1, E2 = W - LBK*Wmin, W + LBK*Wmin self.hps.GetXaxis().SetRangeUser(E1-10, E2+10) self.const.SetRange(1.02*W, E2); self.hps.Fit('const','QRN'); B = self.const.GetParameter(0) self.const.SetRange(E1, 0.98*W); self.hps.Fit('const','QRN'); A = self.const.GetParameter(0) - B # Eb [MeV] | B [T] | Amplitude | Edge linear | Edge square | Background | Backg. Slope | params = np.fromiter([self.Eo, self.Bo, A, 0.0, 0.0, B, 0.0], np.float) self.simple.SetParameters(params); self.simple.SetRange(E1, E2); self.simple.SetNpx(1000) self.simple.SetParLimits(2, 0.0, 1.e+6) if self.Radius: self.simple.FixParameter(1, self.Bo) if self.EdgeP2: self.simple.SetParLimits(4, 0.0, 0.01) else: self.simple.FixParameter(4, 0.0) R = self.hps.Fit('simple','RSQN'); OK = False if self.Radius: self.simple.ReleaseParameter(1) if not self.EdgeP2: self.simple.ReleaseParameter(4) if not R.Status(): E = fitParameters(self.simple) tilt = (1e+3*E['p'][3], 1e+3*E['e'][3], 1e+6*E['p'][4], 1e+6*E['e'][4]) print ' ╔ Simple Edge Fit: ═════════════╤══════════════════════╤═════════════════════════╗' print ' ║ Range from %5.0f to %5.0f keV │ E_beam = %7.2f MeV │ W_max = %9.3f keV ║' % (E1, E2, self.Eo, W) print ' ╟───────────────────────────────┴────────┬─────────────┴─────────────────────────╢' print ' ║ Beam energy: %8.3f ± %5.3f [MeV] │ Bending field: %6.4f ± %6.4f [T] ║' % (E['p'][0], E['e'][0], E['p'][1], E['e'][1]) print ' ║ Background: %8.0f ± %5.0f │ Amplitude: %8.0f ± %6.0f ║' % (E['p'][5], E['e'][5], E['p'][2], E['e'][2]) print ' ║ Edge tilt pol1: %5.1f ± %5.1f [1/eV] │ Edge tilt pol2: %5.0f ± %5.0f [1/eV^2]║' % tilt print ' ╟────────────────────────────────────────┼───────────────────────────────────────╢' print ' ║ χ²/NDF = %5.1f/%3d │ Probability: %8.6f ║' % (R.Chi2(), R.Ndf(), R.Prob()) print ' ╚════════════════════════════════════════╧═══════════════════════════════════════╝\n' self.Legend.Clear(); self.Legend.AddEntry(self.simple, '#chi^{2}/NDF = %5.1f/%3d (Prob: %5.3f)' % (R.Chi2(), R.Ndf(), R.Prob()), 'l') if (E['p'][2]>self.MinAmp) and (E['e'][2]/E['p'][2]<0.5): k = 4.e+6*E['p'][0]*Constants.wo/Constants.me**2; # [eV*eV / eV**2] return 1.e+3*E['p'][0]*k/(1.+k) # Wmax [keV] print 'Simple fit: bad fit, bad amplitude, spread or χ²'; return 0.0 # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- def fitEdgeComple(self, W, LBK): K = 2.e+3*W/Constants.me; Wmin = W/(1+K); E1, E2 = W - LBK*Wmin, W + LBK*Wmin C1, C2 = W - 0.9*LBK*Wmin, W + 0.9*LBK*Wmin def FitP(H, P, q): convol = ROOT.TF1Convolution('simple', 'spreso', E1, E2); convol.SetNofPointsFFT(1000) comple = ROOT.TF1('comple', convol, C1, C2, 10); comple.SetNpx(1000) comple.SetParameters(np.fromiter(P, np.float)) if self.Radius: comple.FixParameter(1, self.Bo) if self.EdgeP2: comple.SetParLimits(4, 0.0, 0.01) else: comple.FixParameter(4, 0.0) comple.FixParameter(7, self.RR) comple.FixParameter(8, self.RL) comple.SetParLimits(9,0.1,100.0) R = H.Fit('comple','RSN') if self.Radius: comple.ReleaseParameter(1) if not self.EdgeP2: comple.ReleaseParameter(4) comple.ReleaseParameter(7); comple.ReleaseParameter(8) q.put([R.Status(), R.Chi2(), R.Ndf(), R.Prob(), fitParameters(comple)]) self.convol.SetRange(E1, E2); self.comple.SetRange(C1, C2) # self.simple.SetLineColor(ROOT.kBlue); self.comple.SetLineColor(ROOT.kRed); P = fitParameters(self.simple)['p']; P.extend([self.RR, self.RL, 1.0]) self.comple.SetParameters(np.fromiter(P, np.float)) self.comple.Draw('SAME'); self.cc.Modified(); self.cc.Update() q = Queue(); p = Process(target=FitP, args=(self.hps, P, q)) p.start(); R = q.get() status, chi2, ndf, prob = R[0:4] pV = np.fromiter(R[4]['p'], np.float); self.comple.SetParameters(pV) pE = np.fromiter(R[4]['e'], np.float); self.comple.SetParErrors( pE) p.join() if not status: tilt = (1e+3*pV[3], 1e+3*pE[3], 1e+6*pV[4], 1e+6*pE[4]) k = 4e+6*pV[0]*Constants.wo/Constants.me**2; # [eV*eV / eV**2] deriv = (1.+k)**2/k/(2.+k) # dE/dWmax, apply scale correcion to the beam energy: BE, dBE = pV[0]-1e-3*self.SC*deriv, (pE[0]**2 + (1e-3*deriv*self.dSC)**2)**0.5 # Beam Energy [MeV] BF, dBF = pV[1], pE[1] # Bending Field [T] BS, dBS = pV[9]*deriv, pE[9]*deriv # Beam Spread [MeV] BR = 1.e+8*pV[0]/Constants.c/BF; dBR = BR * ((dBE/BE)**2 + (dBF/BF)**2)**0.5 # Beam Radius [cm] print ' ' print ' ╔ Convolution Fit: ═════════════╤══════════════════════╤═════════════════════════╗' print ' ║ Range from %5.0f to %5.0f keV │ σR = %4.2f ± %4.2f keV │ σL = %5.2f ± %5.2f keV ║' % (E1, E2, self.RR, self.dRR, self.RL, self.dRL) print ' ╟───────────────────────────────┴────────┬─────────────┴─────────────────────────╢' print ' ║ Beam energy: %8.3f ± %5.3f [MeV] │ Bending field: %6.4f ± %6.4f [T] ║' % (BE, dBE, BF, dBF) print ' ║ σ from beam: %8.3f ± %5.3f [keV] │ Beam spread: %6.0f ± %6.0f [keV] ║' % (pV[9], pE[9], BS, dBS) print ' ║ Edge tilt pol1: %5.1f ± %5.1f [1/eV] │ Edge tilt pol2: %5.0f ± %5.0f [1/eV^2]║' % tilt print ' ╟────────────────────────────────────────┼───────────────────────────────────────╢' print ' ║ χ²/NDF = %5.1f/%3d │ Probability: %8.6f ║' % (chi2, ndf, prob) print ' ╚════════════════════════════════════════╧═══════════════════════════════════════╝\n' self.Legend.AddEntry(self.comple, '#chi^{2}/NDF = %5.1f/%3d (Prob: %5.3f)' % (chi2, ndf, prob), 'l') self.Legend.AddEntry(0, 'E_{beam} = %8.3f #pm %5.3f [MeV]' % (BE, dBE), '') self.Legend.AddEntry(0, '#sigma_{E} = %6.0f #pm %4.0f [keV]' % (BS, dBS), '') self.Legend.AddEntry(0, 'R_{beam} = %6.2f #pm %5.2f [cm]' % (BR, dBR), '') if (pE[0]/pV[0]<0.001) and prob>0.001: return {'BE':[BE,dBE], 'BF':[BF,dBF], 'BS':[BS,dBS], 'BC':[self.VEPP2K['I'], self.VEPP2K['dI']]} else: print 'Status: %d (χ²/NDF = %5.1f/%3d - probability: %8.6f)' % (status, chi2, ndf, prob) print 'Convolution fit: bad fit (status %d), bad amplitude, spread or χ²' % status return False # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- def GetCalibrationResults(self, t, wmax): for attempt in xrange(3): Scale = self.HPGe.Get_Calibration(t, wmax) L = len(Scale['R']) if L: break else: print 'Waiting 15s for calibration results...'; time.sleep(15) if L == 1: c = 0 elif L >1: Q = [Scale['dW'][c] * Scale['dC'][c] * Scale['dR'][c] * Scale['dL'][c] for c in range(L)] # for c in range(L): Q.append(Scale['dW'][c] * Scale['dC'][c] * Scale['dR'][c] * Scale['dL'][c]) if len(Q)==0: print 'No valid calibration was found'; return 0, 0 c = Q.index(min(Q)) else: print 'No valid calibration was found'; return 0, 0 zero, gain = Scale['Z'][c], Scale['G' ][c] self.dW = Scale['dW'][c] # linear calibration statistical error, keV self.SC, self.dSC = Scale['C'][c], Scale['dC'][c] # PB-5 scale correction and its error, keV self.RR, self.dRR = Scale['R'][c], Scale['dR'][c] # Right resolution sigma and its error, keV if self.Asymme: self.RL, self.dRL = Scale['L'][c], Scale['dL'][c] # Left resolution sigma and its error, keV else: self.RL, self.dRL = Scale['R'][c], Scale['dR'][c] # Left resolution sigma and its error, keV self.EP = Scale['X'][c] # exclude peaks, keV print ' ╔ HPGe calibration: %15s ══════════════════╤══════════════════════════╗' % (self.HPGe.outfile.split('/')[-1]) print ' ║ W_max = %9.3f keV │ σR = %6.3f ± %5.3f keV │ σL = %6.3f ± %5.3f keV ║' % (wmax, self.RR, self.dRR, self.RL, self.dRL) print ' ╚══════════════════════════╧══════════════════════════╧══════════════════════════╝\n' return zero, gain # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- class VEPP2K_DB: # cc = ROOT.TCanvas('cc','LOAD [Hz] vs I[mA]', 5, 5, 1000, 1200) # NAMEs = { 't':'t', 'Ee':'energy', 'Ie':'e_current', 'Bo':'nmr_field', 'L1':'load1', 'L2':'load2', 'L3':'load3'} NAMEs = { 't':'t', 'Ee':'energy', 'Ie':'e_current', 'Bo':'nmr_field'} def GetRunInfo(self,filechain): R, T = {'E':0.0, 'dE':0.0, 'I':0.0, 'dI':0.0, 'B':0.0, 'dB':0.0}, { k:[] for k in self.NAMEs.keys()} for el in filechain: fn = el.split('.')[0].replace('SPECTRA','LOADS') + '.sta' try: with open(fn,'rb') as fp: data = cPickle.load(fp) except: print 'sta read error'; return R OK, LREC = (len(data.keys())==10), len(data['t']) if not OK: print 'Bad file: %s' % fn; return False else: for k in data.keys(): L = len(data[k]) if L+2 < LREC: print 'Warning: LREC_t = %d, while LREC_%s = %d' % (LREC,k,L) data[k].extend([data[k][-1]]*(LREC-L)) elif L > LREC: print 'Error: LREC_t = %d, while LREC_%s = %d' % (LREC,k,L) return R for k,v in self.NAMEs.iteritems(): T[k].extend(data[v]) E = np.fromiter(T['Ee'], np.float); R['E'], R['dE'] = E.mean(), E.std() B = np.fromiter(T['Bo'], np.float); R['B'], R['dB'] = B.mean(), B.std() # I = np.fromiter(T['Ie'], np.float); R['I'], R['dI'] = I.mean(), I.std() I = [i for i in T['Ie'] if i > 2.0] if len(I): I = np.fromiter(I, np.float); R['I'], R['dI'] = I.mean(), I.std() else: R['I'], R['dI'] = 0.0, 0.0 # n = len(I); dI = np.zeros(n) # L = np.fromiter(T['L1'], np.float) + np.fromiter(T['L2'], np.float) + np.fromiter(T['L3'], np.float) # L = np.fromiter(T['L2'], np.float) # dL = L**0.5 # self.cc.cd() # self.cc.SetGrid() # LvsI = ROOT.TGraphErrors(n,I,L,dI,dL) # LvsI.Draw('AP') # self.cc.Modified(); self.cc.Update() # raw_input() # LvsI.Delete() # print ' ╔ VEPP2K conditions: ══════╤═════════════════════════╤═══════════════════════════╗' # print ' ║ E = %7.2f ± %5.2f MeV │ Ie = %5.1f ± %5.1f mA │ Bo = %7.5f ± %7.5f T ║' % (R['E'], R['dE'], R['I'], R['dI'], R['B'], R['dB']) # print ' ╚══════════════════════════╧═════════════════════════╧═══════════════════════════╝\n' # if R['I'] < 5.0 : return False return R # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- class EdgeSimple(Constants): # Ai integral (classical formula) and Klein-Nishina cross section ROOT.gROOT.LoadMacro(sys.argv[0].replace(sys.argv[0].split('/')[-1], 'vepp2k/airy_ai_int.C')) ROOT.airy_ai_int(np.array([0.0]), np.array([0.0])) # first call => initialization (could be up to 5 sec) iAiry = ROOT.TF1("af", ROOT.airy_ai_int, -55, 5, 1) # p[0] - Beam Energy [MeV] | p[1] - Bending field [T] | p[2] - Amplitude # p[3] - Linear Slope at Wmax | p[4] - Square Slope at Wmax | p[5] and p[6] - Background and its Slope def __call__(self, x, p): u = x[0]/(1.e+3*p[0]-x[0]) # u [keV / keV] k = p[0] / Constants.Eo # kappa [MeV / MeV] X = p[0]*p[1] * Constants.RemB # Xi, free field if comment next line D = x[0] - p[0]*k/(1.+k)*1.e+3 # (W - Wmax) [keV] I = self.iAiry(ROOT.TMath.Power((u/X),0.6666666666666666) * (1.-k/u)) return I * p[2]*(1.0 + p[3]*D + p[4]*D**2) + p[5] + p[6]*D # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- class HPGeSpread: # Analitical convolution of the HPGe bifurcated Gaussian with normal Gaussian from beam spread No = 1./ROOT.TMath.Sqrt(ROOT.TMath.Pi()) # p[0] - HPGe sigma_R [keV] | p[1] - HPGe sigma_L [keV] | p[2] Beam spread smearing [keV] def __call__(self, x, p): SR = 0.5/(p[0]*p[0] + p[2]*p[2]); RR = ROOT.TMath.Sqrt(SR) SL = 0.5/(p[1]*p[1] + p[2]*p[2]); RL = ROOT.TMath.Sqrt(SL) R = ROOT.TMath.Exp(-x[0]*x[0]*SR) * ROOT.TMath.Erfc(-x[0] * RR * p[0]/p[2]) * RR / (1. + p[1]/p[0]) L = ROOT.TMath.Exp(-x[0]*x[0]*SL) * ROOT.TMath.Erfc( x[0] * RL * p[1]/p[2]) * RL / (1. + p[0]/p[1]) return self.No * (L + R) """ # ЭТО НЕВЕСТЬ КАК ПОЛУЧЕННАЯ НЕПРАВИЛЬНАЯ СВЕРТКА!!! (СОХРАНЕНО ДЛЯ ИСТОРИИ) class HPGeSpread: # Analitical convolution of the HPGe bifurcated Gaussian with normal Gaussian from beam spread # p[0] - HPGe sigma_R [keV] | p[1] - HPGe sigma_L [keV] | p[2] Beam spread smearing [keV] C = 1./ROOT.TMath.Sqrt(2*ROOT.TMath.Pi()) def __call__(self, x, p): R, L = p[0]*p[0] + p[1]*p[1], p[0]*p[0] + p[2]*p[2] R = p[0] * ROOT.TMath.Exp(-0.5*x[0]**2/R) * ROOT.TMath.Erfc(-x[0]*p[0]/(p[2]*ROOT.TMath.Sqrt(R))) L = p[1] * ROOT.TMath.Exp(-0.5*x[0]**2/L) * ROOT.TMath.Erfc( x[0]*p[1]/(p[2]*ROOT.TMath.Sqrt(L))) return self.C*(R+L)/(p[0]+p[1]) # ЭТО НЕВЕСТЬ ОТКУДА ПОЛУЧЕННАЯ НЕПРАВИЛЬНАЯ СВЕРТКА!!! """ # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- class EMSResults: # Beam Energy, Bending Field, Beam energy Spread, Beam Current BE, BF, BS, BC = ROOT.TGraphErrors(), ROOT.TGraphErrors(), ROOT.TGraphErrors(), ROOT.TGraphErrors() SI = ROOT.TProfile('SI','spread vs current', 30, 0, 150, 'g') headline = ('t, s', 'dt, s', ' E, MeV', 'dE, MeV', 'S, keV', 'dS, keV', ' B, T', 'dB, T', 'I, mA', 'dI, mA') headline = '# %8s %5s %8s %5s %5s %5s %6s %6s %5s %6s\n' % headline dataline = '%10d %5d %8.3f %7.3f %6d %7d %6.4f %6.4f %5.1f %6.1f\n' def __init__(self, cfg_file, folder): cfg = ConfigParser.ConfigParser() cfg.read(cfg_file) self.roofile = folder + cfg.get('scan', 'file') self.datfile = folder + cfg.get('scan', 'file') +'.txt' self.espread = folder + 'SvsI.pdf' self.SI.SetTitle(folder) with open(self.datfile,'a') as f: # f.write('# R=140cm, assym = false, edgepoly2 = false\n') f.write(self.headline) def __del__(self): if hasattr(self, 'rc'): self.rc.cd(); self.rc.Clear() if hasattr(self, 'ic'): self.ic.cd(); self.ic.Clear() def SaveGraphs(self): fp = ROOT.TFile(self.roofile, 'RECREATE') if fp.IsOpen(): fp.WriteObject(self.BE, 'BE') fp.WriteObject(self.BF, 'BF') fp.WriteObject(self.BS, 'BS') fp.WriteObject(self.BC, 'BC') fp.Close() def ReadGraphs(self): if os.path.isfile(self.roofile): fp = ROOT.TFile(self.roofile, 'READ') fp.GetObject('BE', self.BE); n = self.BE.GetN() fp.GetObject('BF', self.BF); m = self.BF.GetN() fp.GetObject('BS', self.BS); k = self.BS.GetN() fp.GetObject('BC', self.BC); l = self.BC.GetN() fp.Close() return n, m, k, l else: return 0, 0, 0, 0 def AddPoint(self, UTB, UTE, R): n, m, k, l = self.ReadGraphs() t, dt = 0.5*(UTB + UTE), 0.5*(UTE - UTB) e, de = R['BE']; self.BE.SetPoint(n, t, e); self.BE.SetPointError(n, dt, de) b, db = R['BF']; self.BF.SetPoint(m, t, b); self.BF.SetPointError(m, dt, db) s, ds = R['BS']; self.BS.SetPoint(k, t, s); self.BS.SetPointError(k, dt, ds) c, dc = R['BC']; self.BC.SetPoint(l, t, c); self.BC.SetPointError(l, dt, dc) self.SaveGraphs() with open(self.datfile,'a') as f: f.write(self.dataline % (t,dt, e,de, s,ds, b,db, c,dc)) def ShowRunInfo(self): n, m, k, l = self.ReadGraphs() for g in [self.BE, self.BF, self.BS, self.BC]: g.GetXaxis().SetTimeDisplay(1); g.GetXaxis().SetTimeFormat('#splitline{%b%d}{%H:%M}%F1970-01-01 00:00:00') g.GetXaxis().SetTitle('time'); g.GetXaxis().SetLabelOffset(0.02) g.SetMarkerColor(ROOT.kRed); g.SetLineColor(ROOT.kRed) g.SetMarkerStyle(20); g.SetMarkerSize(1.25); g.GetYaxis().SetDecimals() self.rc = ROOT.TCanvas('rc','BEMS results for VEPP-2000', 0, 0, 1600, 1200) self.rc.Divide(1,3) self.rc.cd(1); self.rc.GetPad(1).SetGrid(); self.BE.Fit('pol0'); self.BE.Draw('AP'); self.BE.GetXaxis().SetTitle('time'); self.BE.GetYaxis().SetTitle('Beam energy [MeV]') self.rc.cd(2); self.rc.GetPad(2).SetGrid(); self.BS.Fit('pol0'); self.BS.Draw('AP'); self.BS.GetXaxis().SetTitle('time'); self.BS.GetYaxis().SetTitle('Beam energy spread [keV]') self.rc.cd(3); self.rc.GetPad(3).SetGrid(); self.BC.Fit('pol0'); self.BC.Draw('AP'); self.BC.GetXaxis().SetTitle('time'); self.BC.GetYaxis().SetTitle('Beam current [mA]') # self.rc.cd(4); self.rc.GetPad(4).SetGrid(); self.BF.Fit('pol0'); self.BF.Draw('AP'); # self.BF.GetXaxis().SetTitle('time'); self.BF.GetYaxis().SetTitle('Bending field [T]') self.rc.Modified(); self.rc.Update(); self.rc.SaveAs(self.roofile + '.pdf') def EnergySpread(self): n, m, k, l = self.ReadGraphs() t, e, s, i = ROOT.Double(), ROOT.Double(), ROOT.Double(), ROOT.Double() xmin, xmax = 100.0, 0.0 ymin, ymax = 10.0, 0.0 for p in range(n): self.BE.GetPoint(p,t,e); de = self.BE.GetErrorY(p) self.BS.GetPoint(p,t,s); ds = self.BS.GetErrorY(p) self.BC.GetPoint(p,t,i); di = self.BC.GetErrorY(p) if (i>0.0) and (s>0.0) and abs(di/i)<0.5 and abs(ds/s)<0.5: S, dS = 10.*s/e, 10.*ds/e self.SI.Fill(i,S,1./dS**2) xmin = min(xmin, i-di); xmax = max(xmax, i+di) ymin = min(ymin, S-dS); ymax = max(ymax, S+dS) self.SI.GetXaxis().SetTitle('Current [mA]'); #self.SI.GetXaxis().SetLabelOffset(0.03) self.SI.GetXaxis().SetRangeUser(xmin, xmax) self.SI.GetYaxis().SetTitle('#sigma_{E}/E #upoint 10^{-4}'); #self.SI.GetYaxis().SetLabelOffset(0.02) self.SI.GetYaxis().SetRangeUser(ymin, ymax) self.SI.SetMarkerColor(ROOT.kRed); self.SI.SetLineColor(ROOT.kRed) self.SI.SetMarkerStyle(20); self.SI.SetMarkerSize(1.25); self.SI.GetYaxis().SetDecimals() self.sc = ROOT.TCanvas('sc','BEMS results for VEPP-2000', 0, 0, 1200, 800) self.sc.cd(), self.sc.SetGrid() self.SI.Draw() self.sc.Modified(); self.sc.Update(); self.sc.SaveAs(self.espread) class Points_Splitter: ENERGY_CHANGE = 5.0 SMALL_CURRENT = 5.0 E_MIN, E_MAX = 100., 1000. success = [] failure = [] def Go(self, flist, todo): from hpge import DataFile T = DataFile() T.ReadHat(flist[0], 100) BDATE = str(int(T.HAT['Date'])) R = VEPP2K_DB().GetRunInfo([flist[0]]) if R: E0 = R['E'] else: E0 = 0.0 while len(flist): f = flist.pop(0) T.ReadHat(f, 100) t, dt = 0.5*(T.utb + T.ute), 0.5*(T.ute - T.utb) R = VEPP2K_DB().GetRunInfo([f]) if T and R: E, dE, I, dI = R['E'], R['dE'], R['I'], R['dI'] SUCCESS = self.E_MIN < E < self.E_MAX SUCCESS = dt > 100. and dE < self.ENERGY_CHANGE SUCCESS = SUCCESS and abs(E-E0) < self.ENERGY_CHANGE SUCCESS = SUCCESS and I > self.SMALL_CURRENT SUCCESS = SUCCESS and len(flist) FAILURE = E < self.E_MIN or I < self.SMALL_CURRENT FAILURE = FAILURE or dE > self.ENERGY_CHANGE FAILURE = FAILURE and len(flist) PFINISH = self.E_MIN < E < self.E_MAX PFINISH = PFINISH and abs(E-E0) > self.ENERGY_CHANGE PFINISH = PFINISH or len(flist)==0 if SUCCESS: self.success.append(f) elif FAILURE: self.failure.append(f) elif PFINISH: EDATE = str(int(T.HAT['Date'])) folder = '%8s-%8s~%3.0fMeV' % (BDATE, EDATE, E0) print folder print 'number of good files: ', len(self.success) print 'number of bad files: ', len(self.failure) if not os.path.exists(folder): os.makedirs(folder) with open('%s/success.list' % (folder), 'w') as fp: for el in self.success: fp.write(el+'\n') with open('%s/failure.list' % (folder), 'w') as fp: for el in self.failure: fp.write(el+'\n') E0 = R['E'] BDATE = EDATE flist.insert(0,f) del self.success[:] del self.failure[:] # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- def Save_for_SND(UTB, UTE, R): t, dt = UTB, UTE - UTB; E, dE = R['BE']; B, dB = R['BF']; S, dS = R['BS'] PL = {'/EMS/DT':dt, '/EMS/E':E, '/EMS/DE':dE, '/EMS/B':B, '/EMS/DB':dB, '/EMS/S':S, '/EMS/DS':dS} print 'WRITING TO SND ', DB = Storage() for k,v in PL.iteritems(): DB.register(k); DB[k]=v; print '.', try: DB.insert(t) except: try: DB.replace(t) except: print 'Warning! Can not write nor replace data!' return print ' DONE' return # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- def fitParameters(fitf): n, p, e = fitf.GetNumberFreeParameters(), [], [] for i in range(n): p.append(fitf.GetParameter(i)) e.append(fitf.GetParError(i)) return {'p':p,'e':e}
muchnoi/HPGe
vepp2k/vepp2k.py
Python
gpl-3.0
31,813
[ "Gaussian" ]
e01d51ec07341c62daa5802565bf91da84f80fbf51486cc59079cb641cae0f2a
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RAffyqcreport(RPackage): """This package creates a QC report for an AffyBatch object. The report is intended to allow the user to quickly assess the quality of a set of arrays in an AffyBatch object.""" homepage = "https://www.bioconductor.org/packages/affyQCReport/" url = "https://git.bioconductor.org/packages/affyQCReport" version('1.54.0', git='https://git.bioconductor.org/packages/affyQCReport', commit='5572e9981dc874b78b4adebf58080cac3fbb69e1') depends_on('r@3.4.0:3.4.9', when='@1.54.0') depends_on('r-biobase', type=('build', 'run')) depends_on('r-affy', type=('build', 'run')) depends_on('r-lattice', type=('build', 'run')) depends_on('r-affyplm', type=('build', 'run')) depends_on('r-genefilter', type=('build', 'run')) depends_on('r-rcolorbrewer', type=('build', 'run')) depends_on('r-simpleaffy', type=('build', 'run')) depends_on('r-xtable', type=('build', 'run'))
skosukhin/spack
var/spack/repos/builtin/packages/r-affyqcreport/package.py
Python
lgpl-2.1
2,212
[ "Bioconductor" ]
69d623acab310fa714d97a65a687be31f832242e333d63750e153a8f18f29e7a
# # Copyright (c) 2009-2015, Jack Poulson # All rights reserved. # # This file is part of Elemental and is under the BSD 2-Clause License, # which can be found in the LICENSE file in the root directory, or at # http://opensource.org/licenses/BSD-2-Clause # from El.core import * from ctypes import CFUNCTYPE # Special matrices # **************** # Deterministic # ============= # Bull's head # ----------- lib.ElBullsHead_c.argtypes = \ lib.ElBullsHead_z.argtypes = \ lib.ElBullsHeadDist_c.argtypes = \ lib.ElBullsHeadDist_z.argtypes = \ [c_void_p,iType] def BullsHead(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == cTag: lib.ElBullsHead_c(*args) elif A.tag == zTag: lib.ElBullsHead_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == cTag: lib.ElBullsHeadDist_c(*args) elif A.tag == zTag: lib.ElBullsHeadDist_z(*args) else: DataExcept() else: TypeExcept() # Cauchy # ------ lib.ElCauchy_s.argtypes = \ lib.ElCauchyDist_s.argtypes = \ [c_void_p,iType,POINTER(sType),iType,POINTER(sType)] lib.ElCauchy_d.argtypes = \ lib.ElCauchyDist_d.argtypes = \ [c_void_p,iType,POINTER(dType),iType,POINTER(dType)] lib.ElCauchy_c.argtypes = \ lib.ElCauchyDist_c.argtypes = \ [c_void_p,iType,POINTER(cType),iType,POINTER(cType)] lib.ElCauchy_z.argtypes = \ lib.ElCauchyDist_z.argtypes = \ [c_void_p,iType,POINTER(zType),iType,POINTER(zType)] def Cauchy(A,x,y): xLen = len(x) yLen = len(y) xBuf = (TagToType(A.tag)*xLen)(*x) yBuf = (TagToType(A.tag)*yLen)(*y) args = [A.obj,xLen,xBuf.yLen,yBuf] if type(A) is Matrix: if A.tag == sTag: lib.ElCauchy_s(*args) elif A.tag == dTag: lib.ElCauchy_d(*args) elif A.tag == cTag: lib.ElCauchy_c(*args) elif A.tag == zTag: lib.ElCauchy_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElCauchyDist_s(*args) elif A.tag == dTag: lib.ElCauchyDist_d(*args) elif A.tag == cTag: lib.ElCauchyDist_c(*args) elif A.tag == zTag: lib.ElCauchyDist_z(*args) else: DataExcept() else: TypeExcept() # Cauchy-like # ----------- lib.ElCauchyLike_s.argtypes = \ lib.ElCauchyLikeDist_s.argtypes = \ [c_void_p,iType,POINTER(sType),iType,POINTER(sType), iType,POINTER(sType),iType,POINTER(sType)] lib.ElCauchyLike_d.argtypes = \ lib.ElCauchyLikeDist_d.argtypes = \ [c_void_p,iType,POINTER(dType),iType,POINTER(dType), iType,POINTER(dType),iType,POINTER(dType)] lib.ElCauchyLike_c.argtypes = \ lib.ElCauchyLikeDist_c.argtypes = \ [c_void_p,iType,POINTER(cType),iType,POINTER(cType), iType,POINTER(cType),iType,POINTER(cType)] lib.ElCauchyLike_z.argtypes = \ lib.ElCauchyLikeDist_z.argtypes = \ [c_void_p,iType,POINTER(zType),iType,POINTER(zType), iType,POINTER(zType),iType,POINTER(zType)] def CauchyLike(A,r,s,x,y): rLen = len(r) sLen = len(s) xLen = len(x) yLen = len(y) rBuf = (TagToType(A.tag)*rLen)(*r) sBuf = (TagToType(A.tag)*sLen)(*s) xBuf = (TagToType(A.tag)*xLen)(*x) yBuf = (TagToType(A.tag)*yLen)(*y) args = [A.obj,rLen,rBuf,sLen,sBuf,xLen,xBuf,yLen,yBuf] if type(A) is Matrix: if A.tag == sTag: lib.ElCauchyLike_s(*args) elif A.tag == dTag: lib.ElCauchyLike_d(*args) elif A.tag == cTag: lib.ElCauchyLike_c(*args) elif A.tag == zTag: lib.ElCauchyLike_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElCauchyLikeDist_s(*args) elif A.tag == dTag: lib.ElCauchyLikeDist_d(*args) elif A.tag == cTag: lib.ElCauchyLikeDist_c(*args) elif A.tag == zTag: lib.ElCauchyLikeDist_z(*args) else: DataExcept() else: TypeExcept() # Circulant # --------- lib.ElCirculant_i.argtypes = \ lib.ElCirculantDist_i.argtypes = \ [c_void_p,iType,POINTER(iType)] lib.ElCirculant_s.argtypes = \ lib.ElCirculantDist_s.argtypes = \ [c_void_p,iType,POINTER(sType)] lib.ElCirculant_d.argtypes = \ lib.ElCirculantDist_d.argtypes = \ [c_void_p,iType,POINTER(dType)] lib.ElCirculant_c.argtypes = \ lib.ElCirculantDist_c.argtypes = \ [c_void_p,iType,POINTER(cType)] lib.ElCirculant_z.argtypes = \ lib.ElCirculantDist_z.argtypes = \ [c_void_p,iType,POINTER(zType)] def Circulant(A,a): aLen = len(a) aBuf = (TagToType(A.tag)*aLen)(*a) args = [A.obj,aLen,aBuf] if type(A) is Matrix: if A.tag == iTag: lib.ElCirculant_i(*args) elif A.tag == sTag: lib.ElCirculant_s(*args) elif A.tag == dTag: lib.ElCirculant_d(*args) elif A.tag == cTag: lib.ElCirculant_c(*args) elif A.tag == zTag: lib.ElCirculant_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElCirculantDist_i(*args) elif A.tag == sTag: lib.ElCirculantDist_s(*args) elif A.tag == dTag: lib.ElCirculantDist_d(*args) elif A.tag == cTag: lib.ElCirculantDist_c(*args) elif A.tag == zTag: lib.ElCirculantDist_z(*args) else: DataExcept() else: TypeExcept() # Demmel # ------ lib.ElDemmel_s.argtypes = \ lib.ElDemmel_d.argtypes = \ lib.ElDemmel_c.argtypes = \ lib.ElDemmel_z.argtypes = \ lib.ElDemmelDist_s.argtypes = \ lib.ElDemmelDist_d.argtypes = \ lib.ElDemmelDist_c.argtypes = \ lib.ElDemmelDist_z.argtypes = \ [c_void_p,iType] def Demmel(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == sTag: lib.ElDemmel_s(*args) elif A.tag == dTag: lib.ElDemmel_d(*args) elif A.tag == cTag: lib.ElDemmel_c(*args) elif A.tag == zTag: lib.ElDemmel_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElDemmelDist_s(*args) elif A.tag == dTag: lib.ElDemmelDist_d(*args) elif A.tag == cTag: lib.ElDemmelDist_c(*args) elif A.tag == zTag: lib.ElDemmelDist_z(*args) else: DataExcept() else: TypeExcept() # Diagonal # -------- lib.ElDiagonal_i.argtypes = \ lib.ElDiagonalDist_i.argtypes = \ [c_void_p,iType,POINTER(iType)] lib.ElDiagonal_s.argtypes = \ lib.ElDiagonalDist_s.argtypes = \ [c_void_p,iType,POINTER(sType)] lib.ElDiagonal_d.argtypes = \ lib.ElDiagonalDist_d.argtypes = \ [c_void_p,iType,POINTER(dType)] lib.ElDiagonal_c.argtypes = \ lib.ElDiagonalDist_c.argtypes = \ [c_void_p,iType,POINTER(cType)] lib.ElDiagonal_z.argtypes = \ lib.ElDiagonalDist_z.argtypes = \ [c_void_p,iType,POINTER(zType)] def Diagonal(A,d): dLen = len(d) dBuf = (TagToType(A.tag)*dLen)(*d) args = [A.obj,dLen,dBuf] if type(A) is Matrix: if A.tag == iTag: lib.ElDiagonal_i(*args) elif A.tag == sTag: lib.ElDiagonal_s(*args) elif A.tag == dTag: lib.ElDiagonal_d(*args) elif A.tag == cTag: lib.ElDiagonal_c(*args) elif A.tag == zTag: lib.ElDiagonal_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElDiagonalDist_i(*args) elif A.tag == sTag: lib.ElDiagonalDist_s(*args) elif A.tag == dTag: lib.ElDiagonalDist_d(*args) elif A.tag == cTag: lib.ElDiagonalDist_c(*args) elif A.tag == zTag: lib.ElDiagonalDist_z(*args) else: DataExcept() else: TypeExcept() # DruinskyToledo # -------------- lib.ElDruinskyToledo_s.argtypes = \ lib.ElDruinskyToledo_d.argtypes = \ lib.ElDruinskyToledo_c.argtypes = \ lib.ElDruinskyToledo_z.argtypes = \ lib.ElDruinskyToledoDist_s.argtypes = \ lib.ElDruinskyToledoDist_d.argtypes = \ lib.ElDruinskyToledoDist_c.argtypes = \ lib.ElDruinskyToledoDist_z.argtypes = \ [c_void_p,iType] def DruinskyToledo(A,k): args = [A.obj,k] if type(A) is Matrix: if A.tag == sTag: lib.ElDruinskyToledo_s(*args) elif A.tag == dTag: lib.ElDruinskyToledo_d(*args) elif A.tag == cTag: lib.ElDruinskyToledo_c(*args) elif A.tag == zTag: lib.ElDruinskyToledo_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElDruinskyToledoDist_s(*args) elif A.tag == dTag: lib.ElDruinskyToledoDist_d(*args) elif A.tag == cTag: lib.ElDruinskyToledoDist_c(*args) elif A.tag == zTag: lib.ElDruinskyToledoDist_z(*args) else: DataExcept() else: TypeExcept() # Dynamic regularization counter-example # -------------------------------------- lib.ElDynamicRegCounter_s.argtypes = \ lib.ElDynamicRegCounter_d.argtypes = \ lib.ElDynamicRegCounter_c.argtypes = \ lib.ElDynamicRegCounter_z.argtypes = \ lib.ElDynamicRegCounterDist_s.argtypes = \ lib.ElDynamicRegCounterDist_d.argtypes = \ lib.ElDynamicRegCounterDist_c.argtypes = \ lib.ElDynamicRegCounterDist_z.argtypes = \ lib.ElDynamicRegCounterSparse_s.argtypes = \ lib.ElDynamicRegCounterSparse_d.argtypes = \ lib.ElDynamicRegCounterSparse_c.argtypes = \ lib.ElDynamicRegCounterSparse_z.argtypes = \ lib.ElDynamicRegCounterDistSparse_s.argtypes = \ lib.ElDynamicRegCounterDistSparse_d.argtypes = \ lib.ElDynamicRegCounterDistSparse_c.argtypes = \ lib.ElDynamicRegCounterDistSparse_z.argtypes = \ [c_void_p,iType] def DynamicRegCounter(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == sTag: lib.ElDynamicRegCounter_s(*args) elif A.tag == dTag: lib.ElDynamicRegCounter_d(*args) elif A.tag == cTag: lib.ElDynamicRegCounter_c(*args) elif A.tag == zTag: lib.ElDynamicRegCounter_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElDynamicRegCounterDist_s(*args) elif A.tag == dTag: lib.ElDynamicRegCounterDist_d(*args) elif A.tag == cTag: lib.ElDynamicRegCounterDist_c(*args) elif A.tag == zTag: lib.ElDynamicRegCounterDist_z(*args) else: DataExcept() elif type(A) is SparseMatrix: if A.tag == sTag: lib.ElDynamicRegCounterSparse_s(*args) elif A.tag == dTag: lib.ElDynamicRegCounterSparse_d(*args) elif A.tag == cTag: lib.ElDynamicRegCounterSparse_c(*args) elif A.tag == zTag: lib.ElDynamicRegCounterSparse_z(*args) else: DataExcept() elif type(A) is DistSparseMatrix: if A.tag == sTag: lib.ElDynamicRegCounterDistSparse_s(*args) elif A.tag == dTag: lib.ElDynamicRegCounterDistSparse_d(*args) elif A.tag == cTag: lib.ElDynamicRegCounterDistSparse_c(*args) elif A.tag == zTag: lib.ElDynamicRegCounterDistSparse_z(*args) else: DataExcept() else: TypeExcept() # Egorov # ------ lib.ElEgorov_c.argtypes = \ lib.ElEgorovDist_c.argtypes = \ [c_void_p,CFUNCTYPE(sType,iType,iType),iType] lib.ElEgorov_z.argtypes = \ lib.ElEgorovDist_z.argtypes = \ [c_void_p,CFUNCTYPE(dType,iType,iType),iType] def Egorov(A,phase,n): cPhase = CFUNCTYPE(TagToType(Base(A.tag)),iType,iType)(phase) args = [A.obj,cPhase,n] if type(A) is Matrix: if A.tag == cTag: lib.ElEgorov_c(*args) elif A.tag == zTag: lib.ElEgorov_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == cTag: lib.ElEgorovDist_c(*args) elif A.tag == zTag: lib.ElEgorovDist_z(*args) else: DataExcept() else: TypeExcept() # Ehrenfest # --------- lib.ElEhrenfest_s.argtypes = \ lib.ElEhrenfest_d.argtypes = \ lib.ElEhrenfest_c.argtypes = \ lib.ElEhrenfest_z.argtypes = \ lib.ElEhrenfestDist_s.argtypes = \ lib.ElEhrenfestDist_d.argtypes = \ lib.ElEhrenfestDist_c.argtypes = \ lib.ElEhrenfestDist_z.argtypes = \ [c_void_p,iType] def Ehrenfest(P,n): args = [P.obj,n] if type(P) is Matrix: if P.tag == sTag: lib.ElEhrenfest_s(*args) elif P.tag == dTag: lib.ElEhrenfest_d(*args) elif P.tag == cTag: lib.ElEhrenfest_c(*args) elif P.tag == zTag: lib.ElEhrenfest_z(*args) else: DataExcept() elif type(P) is DistMatrix: if P.tag == sTag: lib.ElEhrenfestDist_s(*args) elif P.tag == dTag: lib.ElEhrenfestDist_d(*args) elif P.tag == cTag: lib.ElEhrenfestDist_c(*args) elif P.tag == zTag: lib.ElEhrenfestDist_z(*args) else: DataExcept() else: TypeExcept() lib.ElEhrenfestStationary_s.argtypes = \ lib.ElEhrenfestStationary_d.argtypes = \ lib.ElEhrenfestStationary_c.argtypes = \ lib.ElEhrenfestStationary_z.argtypes = \ lib.ElEhrenfestStationaryDist_s.argtypes = \ lib.ElEhrenfestStationaryDist_d.argtypes = \ lib.ElEhrenfestStationaryDist_c.argtypes = \ lib.ElEhrenfestStationaryDist_z.argtypes = \ [c_void_p,iType] def EhrenfestStationary(PInf,n): args = [PInf.obj,n] if type(PInf) is Matrix: if PInf.tag == sTag: lib.ElEhrenfestStationary_s(*args) elif PInf.tag == dTag: lib.ElEhrenfestStationary_d(*args) elif PInf.tag == cTag: lib.ElEhrenfestStationary_c(*args) elif PInf.tag == zTag: lib.ElEhrenfestStationary_z(*args) else: DataExcept() elif type(PInf) is DistMatrix: if PInf.tag == sTag: lib.ElEhrenfestStationaryDist_s(*args) elif PInf.tag == dTag: lib.ElEhrenfestStationaryDist_d(*args) elif PInf.tag == cTag: lib.ElEhrenfestStationaryDist_c(*args) elif PInf.tag == zTag: lib.ElEhrenfestStationaryDist_z(*args) else: DataExcept() else: TypeExcept() lib.ElEhrenfestDecay_s.argtypes = \ lib.ElEhrenfestDecay_d.argtypes = \ lib.ElEhrenfestDecay_c.argtypes = \ lib.ElEhrenfestDecay_z.argtypes = \ lib.ElEhrenfestDecayDist_s.argtypes = \ lib.ElEhrenfestDecayDist_d.argtypes = \ lib.ElEhrenfestDecayDist_c.argtypes = \ lib.ElEhrenfestDecayDist_z.argtypes = \ [c_void_p,iType] def EhrenfestDecay(PInf,n): args = [PInf.obj,n] if type(PInf) is Matrix: if PInf.tag == sTag: lib.ElEhrenfestDecay_s(*args) elif PInf.tag == dTag: lib.ElEhrenfestDecay_d(*args) elif PInf.tag == cTag: lib.ElEhrenfestDecay_c(*args) elif PInf.tag == zTag: lib.ElEhrenfestDecay_z(*args) else: DataExcept() elif type(PInf) is DistMatrix: if PInf.tag == sTag: lib.ElEhrenfestDecayDist_s(*args) elif PInf.tag == dTag: lib.ElEhrenfestDecayDist_d(*args) elif PInf.tag == cTag: lib.ElEhrenfestDecayDist_c(*args) elif PInf.tag == zTag: lib.ElEhrenfestDecayDist_z(*args) else: DataExcept() else: TypeExcept() # Extended Kahan # -------------- lib.ElExtendedKahan_s.argtypes = \ lib.ElExtendedKahan_c.argtypes = \ lib.ElExtendedKahanDist_s.argtypes = \ lib.ElExtendedKahanDist_c.argtypes = \ [c_void_p,iType,sType,sType] lib.ElExtendedKahan_d.argtypes = \ lib.ElExtendedKahan_z.argtypes = \ lib.ElExtendedKahanDist_d.argtypes = \ lib.ElExtendedKahanDist_z.argtypes = \ [c_void_p,iType,dType,dType] def ExtendedKahan(A,k,phi,mu): args = [A.obj,k,phi,mu] if type(A) is Matrix: if A.tag == sTag: lib.ElExtendedKahan_s(*args) elif A.tag == dTag: lib.ElExtendedKahan_d(*args) elif A.tag == cTag: lib.ElExtendedKahan_c(*args) elif A.tag == zTag: lib.ElExtendedKahan_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElExtendedKahanDist_s(*args) elif A.tag == dTag: lib.ElExtendedKahanDist_d(*args) elif A.tag == cTag: lib.ElExtendedKahanDist_c(*args) elif A.tag == zTag: lib.ElExtendedKahanDist_z(*args) else: DataExcept() else: TypeExcept() # Fiedler # ------- lib.ElFiedler_s.argtypes = \ lib.ElFiedlerDist_s.argtypes = \ [c_void_p,iType,POINTER(sType)] lib.ElFiedler_d.argtypes = \ lib.ElFiedlerDist_d.argtypes = \ [c_void_p,iType,POINTER(dType)] lib.ElFiedler_c.argtypes = \ lib.ElFiedlerDist_c.argtypes = \ [c_void_p,iType,POINTER(cType)] lib.ElFiedler_z.argtypes = \ lib.ElFiedlerDist_z.argtypes = \ [c_void_p,iType,POINTER(zType)] def Fiedler(A,c): cLen = len(c) cBuf = (TagToType(A.tag)*cLen)(*c) args = [A.obj,cLen,cBuf] if type(A) is Matrix: if A.tag == sTag: lib.ElFiedler_s(*args) elif A.tag == dTag: lib.ElFiedler_d(*args) elif A.tag == cTag: lib.ElFiedler_c(*args) elif A.tag == zTag: lib.ElFiedler_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElFiedlerDist_s(*args) elif A.tag == dTag: lib.ElFiedlerDist_d(*args) elif A.tag == cTag: lib.ElFiedlerDist_c(*args) elif A.tag == zTag: lib.ElFiedlerDist_z(*args) else: DataExcept() else: TypeExcept() # Forsythe # -------- lib.ElForsythe_i.argtypes = \ lib.ElForsytheDist_i.argtypes = \ [c_void_p,iType,iType,iType] lib.ElForsythe_s.argtypes = \ lib.ElForsytheDist_s.argtypes = \ [c_void_p,iType,sType,sType] lib.ElForsythe_d.argtypes = \ lib.ElForsytheDist_d.argtypes = \ [c_void_p,iType,dType,dType] lib.ElForsythe_c.argtypes = \ lib.ElForsytheDist_c.argtypes = \ [c_void_p,iType,cType,cType] lib.ElForsythe_z.argtypes = \ lib.ElForsytheDist_z.argtypes = \ [c_void_p,iType,zType,zType] def Forsythe(J,n,alpha,lamb): args = [A.obj,n,alpha,lamb] if type(J) is Matrix: if J.tag == iTag: lib.ElForsythe_i(*args) elif J.tag == sTag: lib.ElForsythe_s(*args) elif J.tag == dTag: lib.ElForsythe_d(*args) elif J.tag == cTag: lib.ElForsythe_c(*args) elif J.tag == zTag: lib.ElForsythe_z(*args) else: DataExcept() elif type(J) is DistMatrix: if J.tag == iTag: lib.ElForsytheDist_i(*args) elif J.tag == sTag: lib.ElForsytheDist_s(*args) elif J.tag == dTag: lib.ElForsytheDist_d(*args) elif J.tag == cTag: lib.ElForsytheDist_c(*args) elif J.tag == zTag: lib.ElForsytheDist_z(*args) else: DataExcept() else: TypeExcept() # Fox-Li # ------ lib.ElFoxLi_c.argtypes = \ lib.ElFoxLiDist_c.argtypes = \ [c_void_p,iType,sType] lib.ElFoxLi_z.argtypes = \ lib.ElFoxLiDist_z.argtypes = \ [c_void_p,iType,dType] def FoxLi(A,n,omega=48.): args = [A.obj,n,omega] if type(A) is Matrix: if A.tag == cTag: lib.ElFoxLi_c(*args) elif A.tag == zTag: lib.ElFoxLi_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == cTag: lib.ElFoxLiDist_c(*args) elif A.tag == zTag: lib.ElFoxLiDist_z(*args) else: DataExcept() else: TypeExcept() # Fourier # ------- lib.ElFourier_c.argtypes = \ lib.ElFourier_z.argtypes = \ lib.ElFourierDist_c.argtypes = \ lib.ElFourierDist_z.argtypes = \ [c_void_p,iType] def Fourier(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == cTag: lib.ElFourier_c(*args) elif A.tag == zTag: lib.ElFourier_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == cTag: lib.ElFourierDist_c(*args) elif A.tag == zTag: lib.ElFourierDist_z(*args) else: DataExcept() else: TypeExcept() # Fourier-Identity # ---------------- lib.ElFourierIdentity_c.argtypes = \ lib.ElFourierIdentity_z.argtypes = \ lib.ElFourierIdentityDist_c.argtypes = \ lib.ElFourierIdentityDist_z.argtypes = \ [c_void_p,iType] def FourierIdentity(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == cTag: lib.ElFourierIdentity_c(*args) elif A.tag == zTag: lib.ElFourierIdentity_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == cTag: lib.ElFourierIdentityDist_c(*args) elif A.tag == zTag: lib.ElFourierIdentityDist_z(*args) else: DataExcept() else: TypeExcept() # GCD matrix # ---------- lib.ElGCDMatrix_i.argtypes = \ lib.ElGCDMatrix_s.argtypes = \ lib.ElGCDMatrix_d.argtypes = \ lib.ElGCDMatrix_c.argtypes = \ lib.ElGCDMatrix_z.argtypes = \ lib.ElGCDMatrixDist_i.argtypes = \ lib.ElGCDMatrixDist_s.argtypes = \ lib.ElGCDMatrixDist_d.argtypes = \ lib.ElGCDMatrixDist_c.argtypes = \ lib.ElGCDMatrixDist_z.argtypes = \ [c_void_p,iType,iType] def GCDMatrix(G,m,n): args = [G.obj,m,n] if type(G) is Matrix: if G.tag == iTag: lib.ElGCDMatrix_i(*args) elif G.tag == sTag: lib.ElGCDMatrix_s(*args) elif G.tag == dTag: lib.ElGCDMatrix_d(*args) elif G.tag == cTag: lib.ElGCDMatrix_c(*args) elif G.tag == zTag: lib.ElGCDMatrix_z(*args) else: DataExcept() elif type(G) is DistMatrix: if G.tag == iTag: lib.ElGCDMatrixDist_i(*args) elif G.tag == sTag: lib.ElGCDMatrixDist_s(*args) elif G.tag == dTag: lib.ElGCDMatrixDist_d(*args) elif G.tag == cTag: lib.ElGCDMatrixDist_c(*args) elif G.tag == zTag: lib.ElGCDMatrixDist_z(*args) else: DataExcept() else: TypeExcept() # Gear matrix # ----------- lib.ElGear_i.argtypes = \ lib.ElGear_s.argtypes = \ lib.ElGear_d.argtypes = \ lib.ElGear_c.argtypes = \ lib.ElGear_z.argtypes = \ lib.ElGearDist_i.argtypes = \ lib.ElGearDist_s.argtypes = \ lib.ElGearDist_d.argtypes = \ lib.ElGearDist_c.argtypes = \ lib.ElGearDist_z.argtypes = \ [c_void_p,iType,iType,iType] def Gear(G,n,s,t): args = [G.obj,n,s,t] if type(G) is Matrix: if G.tag == iTag: lib.ElGear_i(*args) elif G.tag == sTag: lib.ElGear_s(*args) elif G.tag == dTag: lib.ElGear_d(*args) elif G.tag == cTag: lib.ElGear_c(*args) elif G.tag == zTag: lib.ElGear_z(*args) else: DataExcept() elif type(G) is DistMatrix: if G.tag == iTag: lib.ElGearDist_i(*args) elif G.tag == sTag: lib.ElGearDist_s(*args) elif G.tag == dTag: lib.ElGearDist_d(*args) elif G.tag == cTag: lib.ElGearDist_c(*args) elif G.tag == zTag: lib.ElGearDist_z(*args) else: DataExcept() else: TypeExcept() # GEPP Growth # ----------- lib.ElGEPPGrowth_s.argtypes = \ lib.ElGEPPGrowth_d.argtypes = \ lib.ElGEPPGrowth_c.argtypes = \ lib.ElGEPPGrowth_z.argtypes = \ lib.ElGEPPGrowthDist_s.argtypes = \ lib.ElGEPPGrowthDist_d.argtypes = \ lib.ElGEPPGrowthDist_c.argtypes = \ lib.ElGEPPGrowthDist_z.argtypes = \ [c_void_p,iType] def GEPPGrowth(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == sTag: lib.ElGEPPGrowth_s(*args) elif A.tag == dTag: lib.ElGEPPGrowth_d(*args) elif A.tag == cTag: lib.ElGEPPGrowth_c(*args) elif A.tag == zTag: lib.ElGEPPGrowth_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElGEPPGrowthDist_s(*args) elif A.tag == dTag: lib.ElGEPPGrowthDist_d(*args) elif A.tag == cTag: lib.ElGEPPGrowthDist_c(*args) elif A.tag == zTag: lib.ElGEPPGrowthDist_z(*args) else: DataExcept() else: TypeExcept() # Golub/Klema/Stewart # ------------------- lib.ElGKS_s.argtypes = \ lib.ElGKS_d.argtypes = \ lib.ElGKS_c.argtypes = \ lib.ElGKS_z.argtypes = \ lib.ElGKSDist_s.argtypes = \ lib.ElGKSDist_d.argtypes = \ lib.ElGKSDist_c.argtypes = \ lib.ElGKSDist_z.argtypes = \ [c_void_p,iType] def GKS(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == sTag: lib.ElGKS_s(*args) elif A.tag == dTag: lib.ElGKS_d(*args) elif A.tag == cTag: lib.ElGKS_c(*args) elif A.tag == zTag: lib.ElGKS_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElGKSDist_s(*args) elif A.tag == dTag: lib.ElGKSDist_d(*args) elif A.tag == cTag: lib.ElGKSDist_c(*args) elif A.tag == zTag: lib.ElGKSDist_z(*args) else: DataExcept() else: TypeExcept() # Grcar # ----- lib.ElGrcar_i.argtypes = \ lib.ElGrcar_s.argtypes = \ lib.ElGrcar_d.argtypes = \ lib.ElGrcar_c.argtypes = \ lib.ElGrcar_z.argtypes = \ lib.ElGrcarDist_i.argtypes = \ lib.ElGrcarDist_s.argtypes = \ lib.ElGrcarDist_d.argtypes = \ lib.ElGrcarDist_c.argtypes = \ lib.ElGrcarDist_z.argtypes = \ [c_void_p,iType,iType] def Grcar(A,n,k=3): args = [A.obj,n,k] if type(A) is Matrix: if A.tag == iTag: lib.ElGrcar_i(*args) elif A.tag == sTag: lib.ElGrcar_s(*args) elif A.tag == dTag: lib.ElGrcar_d(*args) elif A.tag == cTag: lib.ElGrcar_c(*args) elif A.tag == zTag: lib.ElGrcar_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElGrcarDist_i(*args) elif A.tag == sTag: lib.ElGrcarDist_s(*args) elif A.tag == dTag: lib.ElGrcarDist_d(*args) elif A.tag == cTag: lib.ElGrcarDist_c(*args) elif A.tag == zTag: lib.ElGrcarDist_z(*args) else: DataExcept() else: TypeExcept() # Haar # ---- lib.ElHaar_s.argtypes = \ lib.ElHaar_d.argtypes = \ lib.ElHaar_c.argtypes = \ lib.ElHaar_z.argtypes = \ lib.ElHaarDist_s.argtypes = \ lib.ElHaarDist_d.argtypes = \ lib.ElHaarDist_c.argtypes = \ lib.ElHaarDist_z.argtypes = \ [c_void_p,iType] def Haar(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == sTag: lib.ElHaar_s(*args) elif A.tag == dTag: lib.ElHaar_d(*args) elif A.tag == cTag: lib.ElHaar_c(*args) elif A.tag == zTag: lib.ElHaar_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElHaarDist_s(*args) elif A.tag == dTag: lib.ElHaarDist_d(*args) elif A.tag == cTag: lib.ElHaarDist_c(*args) elif A.tag == zTag: lib.ElHaarDist_z(*args) else: DataExcept() else: TypeExcept() lib.ElImplicitHaar_s.argtypes = \ lib.ElImplicitHaar_d.argtypes = \ lib.ElImplicitHaar_c.argtypes = \ lib.ElImplicitHaar_z.argtypes = \ lib.ElImplicitHaarDist_s.argtypes = \ lib.ElImplicitHaarDist_d.argtypes = \ lib.ElImplicitHaarDist_c.argtypes = \ lib.ElImplicitHaarDist_z.argtypes = \ [c_void_p,c_void_p,c_void_p,iType] def ImplicitHaar(A,n): if type(A) is Matrix: t = Matrix(A.tag) d = Matrix(Base(A.tag)) args = [A.obj,t.obj,d.obj,n] if A.tag == sTag: lib.ElImplicitHaar_s(*args) elif A.tag == dTag: lib.ElImplicitHaar_d(*args) elif A.tag == cTag: lib.ElImplicitHaar_c(*args) elif A.tag == zTag: lib.ElImplicitHaar_z(*args) else: DataExcept() return t, d elif type(A) is DistMatrix: t = DistMatrix(A.tag,MC,STAR,A.Grid()) d = DistMatrix(Base(A.tag),MC,STAR,A.Grid()) args = [A.obj,t.obj,d.obj,n] if A.tag == sTag: lib.ElImplicitHaarDist_s(*args) elif A.tag == dTag: lib.ElImplicitHaarDist_d(*args) elif A.tag == cTag: lib.ElImplicitHaarDist_c(*args) elif A.tag == zTag: lib.ElImplicitHaarDist_z(*args) else: DataExcept() return t, d else: TypeExcept() # Hankel # ------ lib.ElHankel_i.argtypes = \ lib.ElHankelDist_i.argtypes = \ [c_void_p,iType,iType,iType,POINTER(iType)] lib.ElHankel_s.argtypes = \ lib.ElHankelDist_s.argtypes = \ [c_void_p,iType,iType,iType,POINTER(sType)] lib.ElHankel_d.argtypes = \ lib.ElHankelDist_d.argtypes = \ [c_void_p,iType,iType,iType,POINTER(dType)] lib.ElHankel_c.argtypes = \ lib.ElHankelDist_c.argtypes = \ [c_void_p,iType,iType,iType,POINTER(cType)] lib.ElHankel_z.argtypes = \ lib.ElHankelDist_z.argtypes = \ [c_void_p,iType,iType,iType,POINTER(zType)] def Hankel(A,m,n,a): aLen = len(a) aBuf = (TagToType(A.tag)*aLen)(*a) args = [A.obj,m,n,aLen,aBuf] if type(A) is Matrix: if A.tag == iTag: lib.ElHankel_i(*args) elif A.tag == sTag: lib.ElHankel_s(*args) elif A.tag == dTag: lib.ElHankel_d(*args) elif A.tag == cTag: lib.ElHankel_c(*args) elif A.tag == zTag: lib.ElHankel_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElHankelDist_i(*args) elif A.tag == sTag: lib.ElHankelDist_s(*args) elif A.tag == dTag: lib.ElHankelDist_d(*args) elif A.tag == cTag: lib.ElHankelDist_c(*args) elif A.tag == zTag: lib.ElHankelDist_z(*args) else: DataExcept() else: TypeExcept() # Hanowa # ------ lib.ElHanowa_i.argtypes = \ lib.ElHanowaDist_i.argtypes = \ [c_void_p,iType,iType] lib.ElHanowa_s.argtypes = \ lib.ElHanowaDist_s.argtypes = \ [c_void_p,iType,sType] lib.ElHanowa_d.argtypes = \ lib.ElHanowaDist_d.argtypes = \ [c_void_p,iType,dType] lib.ElHanowa_c.argtypes = \ lib.ElHanowaDist_c.argtypes = \ [c_void_p,iType,cType] lib.ElHanowa_z.argtypes = \ lib.ElHanowaDist_z.argtypes = \ [c_void_p,iType,zType] def Hanowa(A,n,mu): args = [A.obj,n,mu] if type(A) is Matrix: if A.tag == iTag: lib.ElHanowa_i(*args) elif A.tag == sTag: lib.ElHanowa_s(*args) elif A.tag == dTag: lib.ElHanowa_d(*args) elif A.tag == cTag: lib.ElHanowa_c(*args) elif A.tag == zTag: lib.ElHanowa_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElHanowaDist_i(*args) elif A.tag == sTag: lib.ElHanowaDist_s(*args) elif A.tag == dTag: lib.ElHanowaDist_d(*args) elif A.tag == cTag: lib.ElHanowaDist_c(*args) elif A.tag == zTag: lib.ElHanowaDist_z(*args) else: DataExcept() else: TypeExcept() # Hatano-Nelson # ------------- lib.ElHatanoNelson_s.argtypes = \ lib.ElHatanoNelsonDist_s.argtypes = \ [c_void_p,iType,sType,sType,sType,bType] lib.ElHatanoNelson_d.argtypes = \ lib.ElHatanoNelsonDist_d.argtypes = \ [c_void_p,iType,dType,dType,dType,bType] lib.ElHatanoNelson_c.argtypes = \ lib.ElHatanoNelsonDist_c.argtypes = \ [c_void_p,iType,cType,sType,cType,bType] lib.ElHatanoNelson_z.argtypes = \ lib.ElHatanoNelsonDist_z.argtypes = \ [c_void_p,iType,zType,dType,zType,bType] def HatanoNelson(A,n,center,radius,g,periodic=True): args = [A.obj,n,center,radius,g,periodic] if type(A) is Matrix: if A.tag == sTag: lib.ElHatanoNelson_s(*args) elif A.tag == dTag: lib.ElHatanoNelson_d(*args) elif A.tag == cTag: lib.ElHatanoNelson_c(*args) elif A.tag == zTag: lib.ElHatanoNelson_z(*args) else: DataExcept() elif tyep(A) is DistMatrix: if A.tag == sTag: lib.ElHatanoNelsonDist_s(*args) elif A.tag == dTag: lib.ElHatanoNelsonDist_d(*args) elif A.tag == cTag: lib.ElHatanoNelsonDist_c(*args) elif A.tag == zTag: lib.ElHatanoNelsonDist_z(*args) else: DataExcept() else: TypeExcept() # Helmholtz # --------- lib.ElHelmholtz1D_s.argtypes = \ lib.ElHelmholtz1DDist_s.argtypes = \ lib.ElHelmholtz1DSparse_s.argtypes = \ lib.ElHelmholtz1DDistSparse_s.argtypes = \ [c_void_p,iType,sType] lib.ElHelmholtz1D_d.argtypes = \ lib.ElHelmholtz1DDist_d.argtypes = \ lib.ElHelmholtz1DSparse_d.argtypes = \ lib.ElHelmholtz1DDistSparse_d.argtypes = \ [c_void_p,iType,dType] lib.ElHelmholtz1D_c.argtypes = \ lib.ElHelmholtz1DDist_c.argtypes = \ lib.ElHelmholtz1DSparse_c.argtypes = \ lib.ElHelmholtz1DDistSparse_c.argtypes = \ [c_void_p,iType,cType] lib.ElHelmholtz1D_z.argtypes = \ lib.ElHelmholtz1DDist_z.argtypes = \ lib.ElHelmholtz1DSparse_z.argtypes = \ lib.ElHelmholtz1DDistSparse_z.argtypes = \ [c_void_p,iType,zType] def Helmholtz1D(H,nx,shift): args = [H.obj,nx,shift] if type(A) is Matrix: if A.tag == sTag: lib.ElHelmholtz1D_s(*args) elif A.tag == dTag: lib.ElHelmholtz1D_d(*args) elif A.tag == cTag: lib.ElHelmholtz1D_c(*args) elif A.tag == zTag: lib.ElHelmholtz1D_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElHelmholtz1DDist_s(*args) elif A.tag == dTag: lib.ElHelmholtz1DDist_d(*args) elif A.tag == cTag: lib.ElHelmholtz1DDist_c(*args) elif A.tag == zTag: lib.ElHelmholtz1DDist_z(*args) else: DataExcept() elif type(A) is SparseMatrix: if A.tag == sTag: lib.ElHelmholtz1DSparse_s(*args) elif A.tag == dTag: lib.ElHelmholtz1DSparse_d(*args) elif A.tag == cTag: lib.ElHelmholtz1DSparse_c(*args) elif A.tag == zTag: lib.ElHelmholtz1DSparse_z(*args) else: DataExcept() elif type(A) is DistSparseMatrix: if A.tag == sTag: lib.ElHelmholtz1DDistSparse_s(*args) elif A.tag == dTag: lib.ElHelmholtz1DDistSparse_d(*args) elif A.tag == cTag: lib.ElHelmholtz1DDistSparse_c(*args) elif A.tag == zTag: lib.ElHelmholtz1DDistSparse_z(*args) else: DataExcept() else: TypeExcept() lib.ElHelmholtz2D_s.argtypes = \ lib.ElHelmholtz2DDist_s.argtypes = \ lib.ElHelmholtz2DSparse_s.argtypes = \ lib.ElHelmholtz2DDistSparse_s.argtypes = \ [c_void_p,iType,iType,sType] lib.ElHelmholtz2D_d.argtypes = \ lib.ElHelmholtz2DDist_d.argtypes = \ lib.ElHelmholtz2DSparse_d.argtypes = \ lib.ElHelmholtz2DDistSparse_d.argtypes = \ [c_void_p,iType,iType,dType] lib.ElHelmholtz2D_c.argtypes = \ lib.ElHelmholtz2DDist_c.argtypes = \ lib.ElHelmholtz2DSparse_c.argtypes = \ lib.ElHelmholtz2DDistSparse_c.argtypes = \ [c_void_p,iType,iType,cType] lib.ElHelmholtz2D_z.argtypes = \ lib.ElHelmholtz2DDist_z.argtypes = \ lib.ElHelmholtz2DSparse_z.argtypes = \ lib.ElHelmholtz2DDistSparse_z.argtypes = \ [c_void_p,iType,iType,zType] def Helmholtz2D(H,nx,ny,shift): args = [H.obj,nx,ny,shift] if type(A) is Matrix: if A.tag == sTag: lib.ElHelmholtz2D_s(*args) elif A.tag == dTag: lib.ElHelmholtz2D_d(*args) elif A.tag == cTag: lib.ElHelmholtz2D_c(*args) elif A.tag == zTag: lib.ElHelmholtz2D_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElHelmholtz2DDist_s(*args) elif A.tag == dTag: lib.ElHelmholtz2DDist_d(*args) elif A.tag == cTag: lib.ElHelmholtz2DDist_c(*args) elif A.tag == zTag: lib.ElHelmholtz2DDist_z(*args) else: DataExcept() elif type(A) is SparseMatrix: if A.tag == sTag: lib.ElHelmholtz2DSparse_s(*args) elif A.tag == dTag: lib.ElHelmholtz2DSparse_d(*args) elif A.tag == cTag: lib.ElHelmholtz2DSparse_c(*args) elif A.tag == zTag: lib.ElHelmholtz2DSparse_z(*args) else: DataExcept() elif type(A) is DistSparseMatrix: if A.tag == sTag: lib.ElHelmholtz2DSparse_s(*args) elif A.tag == dTag: lib.ElHelmholtz2DSparse_d(*args) elif A.tag == cTag: lib.ElHelmholtz2DSparse_c(*args) elif A.tag == zTag: lib.ElHelmholtz2DSparse_z(*args) else: DataExcept() else: TypeExcept() lib.ElHelmholtz3D_s.argtypes = \ lib.ElHelmholtz3DDist_s.argtypes = \ [c_void_p,iType,iType,iType,sType] lib.ElHelmholtz3D_d.argtypes = \ lib.ElHelmholtz3DDist_d.argtypes = \ [c_void_p,iType,iType,iType,dType] lib.ElHelmholtz3D_c.argtypes = \ lib.ElHelmholtz3DDist_c.argtypes = \ [c_void_p,iType,iType,iType,cType] lib.ElHelmholtz3D_z.argtypes = \ lib.ElHelmholtz3DDist_z.argtypes = \ [c_void_p,iType,iType,iType,zType] def Helmholtz3D(H,nx,ny,nz,shift): args = [H.obj,nx,ny,nz,shift] if type(A) is Matrix: if A.tag == sTag: lib.ElHelmholtz3D_s(*args) elif A.tag == dTag: lib.ElHelmholtz3D_d(*args) elif A.tag == cTag: lib.ElHelmholtz3D_c(*args) elif A.tag == zTag: lib.ElHelmholtz3D_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElHelmholtz3DDist_s(*args) elif A.tag == dTag: lib.ElHelmholtz3DDist_d(*args) elif A.tag == cTag: lib.ElHelmholtz3DDist_c(*args) elif A.tag == zTag: lib.ElHelmholtz3DDist_z(*args) else: DataExcept() elif type(A) is SparseMatrix: if A.tag == sTag: lib.ElHelmholtz3DSparse_s(*args) elif A.tag == dTag: lib.ElHelmholtz3DSparse_d(*args) elif A.tag == cTag: lib.ElHelmholtz3DSparse_c(*args) elif A.tag == zTag: lib.ElHelmholtz3DSparse_z(*args) else: DataExcept() elif type(A) is DistSparseMatrix: if A.tag == sTag: lib.ElHelmholtz3DDistSparse_s(*args) elif A.tag == dTag: lib.ElHelmholtz3DDistSparse_d(*args) elif A.tag == cTag: lib.ElHelmholtz3DDistSparse_c(*args) elif A.tag == zTag: lib.ElHelmholtz3DDistSparse_z(*args) else: DataExcept() else: TypeExcept() # Helmholtz with PML # ------------------ lib.ElHelmholtzPML1D_c.argtypes = \ lib.ElHelmholtzPML1DDist_c.argtypes = \ lib.ElHelmholtzPML1DSparse_c.argtypes = \ lib.ElHelmholtzPML1DDistSparse_c.argtypes = \ [c_void_p,iType,cType,iType,sType,sType] lib.ElHelmholtzPML1D_z.argtypes = \ lib.ElHelmholtzPML1DDist_z.argtypes = \ lib.ElHelmholtzPML1DSparse_z.argtypes = \ lib.ElHelmholtzPML1DDistSparse_z.argtypes = \ [c_void_p,iType,zType,iType,dType,dType] def HelmholtzPML1D(H,nx,omega,numPml,sigma,pmlExp): args = [H.obj,nx,omega,numPml,sigma,pmlExp] if type(A) is Matrix: if A.tag == cTag: lib.ElHelmholtzPML1D_c(*args) elif A.tag == zTag: lib.ElHelmholtzPML1D_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == cTag: lib.ElHelmholtzPML1DDist_c(*args) elif A.tag == zTag: lib.ElHelmholtzPML1DDist_z(*args) else: DataExcept() elif type(A) is SparseMatrix: if A.tag == cTag: lib.ElHelmholtzPML1DSparse_c(*args) elif A.tag == zTag: lib.ElHelmholtzPML1DSparse_z(*args) else: DataExcept() elif type(A) is DistSparseMatrix: if A.tag == cTag: lib.ElHelmholtzPML1DDistSparse_c(*args) elif A.tag == zTag: lib.ElHelmholtzPML1DDistSparse_z(*args) else: DataExcept() else: TypeExcept() lib.ElHelmholtzPML2D_c.argtypes = \ lib.ElHelmholtzPML2DDist_c.argtypes = \ lib.ElHelmholtzPML2DSparse_c.argtypes = \ lib.ElHelmholtzPML2DDistSparse_c.argtypes = \ [c_void_p,iType,iType,cType,iType,sType,sType] lib.ElHelmholtzPML2D_z.argtypes = \ lib.ElHelmholtzPML2DDist_z.argtypes = \ lib.ElHelmholtzPML2DSparse_z.argtypes = \ lib.ElHelmholtzPML2DDistSparse_z.argtypes = \ [c_void_p,iType,iType,zType,iType,dType,dType] def HelmholtzPML2D(H,nx,ny,omega,numPml,sigma,pmlExp): args = [H.obj,nx,ny,omega,numPml,sigma,pmlExp] if type(A) is Matrix: if A.tag == cTag: lib.ElHelmholtzPML2D_c(*args) elif A.tag == zTag: lib.ElHelmholtzPML2D_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == cTag: lib.ElHelmholtzPML2DDist_c(*args) elif A.tag == zTag: lib.ElHelmholtzPML2DDist_z(*args) else: DataExcept() elif type(A) is SparseMatrix: if A.tag == cTag: lib.ElHelmholtzPML2DSparse_c(*args) elif A.tag == zTag: lib.ElHelmholtzPML2DSparse_z(*args) else: DataExcept() elif type(A) is DistSparseMatrix: if A.tag == cTag: lib.ElHelmholtzPML2DDistSparse_c(*args) elif A.tag == zTag: lib.ElHelmholtzPML2DDistSparse_z(*args) else: DataExcept() else: TypeExcept() lib.ElHelmholtzPML3D_c.argtypes = \ lib.ElHelmholtzPML3DDist_c.argtypes = \ lib.ElHelmholtzPML3DSparse_c.argtypes = \ lib.ElHelmholtzPML3DDistSparse_c.argtypes = \ [c_void_p,iType,iType,iType,cType,iType,sType,sType] lib.ElHelmholtzPML3D_z.argtypes = \ lib.ElHelmholtzPML3DDist_z.argtypes = \ lib.ElHelmholtzPML3DSparse_z.argtypes = \ lib.ElHelmholtzPML3DDistSparse_z.argtypes = \ [c_void_p,iType,iType,iType,zType,iType,dType,dType] def HelmholtzPML3D(H,nx,ny,nz,omega,numPml,sigma,pmlExp): args = [H.obj,nx,ny,nz,omega,numPml,sigma,pmlExp] if type(A) is Matrix: if A.tag == cTag: lib.ElHelmholtzPML3D_c(*args) elif A.tag == zTag: lib.ElHelmholtzPML3D_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == cTag: lib.ElHelmholtzPML3DDist_c(*args) elif A.tag == zTag: lib.ElHelmholtzPML3DDist_z(*args) else: DataExcept() elif type(A) is SparseMatrix: if A.tag == cTag: lib.ElHelmholtzPML3DSparse_c(*args) elif A.tag == zTag: lib.ElHelmholtzPML3DSparse_z(*args) else: DataExcept() elif type(A) is DistSparseMatrix: if A.tag == cTag: lib.ElHelmholtzPML3DDistSparse_c(*args) elif A.tag == zTag: lib.ElHelmholtzPML3DDistSparse_z(*args) else: DataExcept() else: TypeExcept() # Hermitian from EVD # ------------------ lib.ElHermitianFromEVD_s.argtypes = \ lib.ElHermitianFromEVD_d.argtypes = \ lib.ElHermitianFromEVD_c.argtypes = \ lib.ElHermitianFromEVD_z.argtypes = \ lib.ElHermitianFromEVDDist_s.argtypes = \ lib.ElHermitianFromEVDDist_d.argtypes = \ lib.ElHermitianFromEVDDist_c.argtypes = \ lib.ElHermitianFromEVDDist_z.argtypes = \ [c_uint,c_void_p,c_void_p,c_void_p] def HermitianFromEVD(uplo,A,w,Z): if type(A) is not type(w) or type(w) is not type(Z): raise Exception('Types of {A,w,Z} must match') if A.tag != Z.tag: raise Exception('Datatypes of A and Z must match') if w.tag != Base(Z.tag): raise Exception('w must be of the base datatype of Z') args = [uplo,A.obj,w.obj,Z.obj] if type(Z) is Matrix: if Z.tag == sTag: lib.ElHermitianFromEVD_s(*args) elif Z.tag == dTag: lib.ElHermitianFromEVD_d(*args) elif Z.tag == cTag: lib.ElHermitianFromEVD_c(*args) elif Z.tag == zTag: lib.ElHermitianFromEVD_z(*args) else: DataExcept() elif type(Z) is DistMatrix: if Z.tag == sTag: lib.ElHermitianFromEVDDist_s(*args) elif Z.tag == dTag: lib.ElHermitianFromEVDDist_d(*args) elif Z.tag == cTag: lib.ElHermitianFromEVDDist_c(*args) elif Z.tag == zTag: lib.ElHermitianFromEVDDist_z(*args) else: DataExcept() else: TypeExcept() # Hermitian uniform spectrum # -------------------------- lib.ElHermitianUniformSpectrum_s.argtypes = \ lib.ElHermitianUniformSpectrum_c.argtypes = \ lib.ElHermitianUniformSpectrumDist_s.argtypes = \ lib.ElHermitianUniformSpectrumDist_c.argtypes = \ [c_void_p,iType,sType,sType] lib.ElHermitianUniformSpectrum_d.argtypes = \ lib.ElHermitianUniformSpectrum_z.argtypes = \ lib.ElHermitianUniformSpectrumDist_d.argtypes = \ lib.ElHermitianUniformSpectrumDist_z.argtypes = \ [c_void_p,iType,dType,dType] def HermitianUniformSpectrum(A,n,lower=0,upper=1): args = [A.obj,n,lower,upper] if type(A) is Matrix: if A.tag == sTag: lib.ElHermitianUniformSpectrum_s(*args) elif A.tag == dTag: lib.ElHermitianUniformSpectrum_d(*args) elif A.tag == cTag: lib.ElHermitianUniformSpectrum_c(*args) elif A.tag == zTag: lib.ElHermitianUniformSpectrum_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElHermitianUniformSpectrumDist_s(*args) elif A.tag == dTag: lib.ElHermitianUniformSpectrumDist_d(*args) elif A.tag == cTag: lib.ElHermitianUniformSpectrumDist_c(*args) elif A.tag == zTag: lib.ElHermitianUniformSpectrumDist_z(*args) else: DataExcept() else: TypeExcept() # Hilbert # ------- lib.ElHilbert_s.argtypes = \ lib.ElHilbert_d.argtypes = \ lib.ElHilbert_c.argtypes = \ lib.ElHilbert_z.argtypes = \ lib.ElHilbertDist_s.argtypes = \ lib.ElHilbertDist_d.argtypes = \ lib.ElHilbertDist_c.argtypes = \ lib.ElHilbertDist_z.argtypes = \ [c_void_p,iType] def Hilbert(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == sTag: lib.ElHilbert_s(*args) elif A.tag == dTag: lib.ElHilbert_d(*args) elif A.tag == cTag: lib.ElHilbert_c(*args) elif A.tag == zTag: lib.ElHilbert_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElHilbertDist_s(*args) elif A.tag == dTag: lib.ElHilbertDist_d(*args) elif A.tag == cTag: lib.ElHilbertDist_c(*args) elif A.tag == zTag: lib.ElHilbertDist_z(*args) else: DataExcept() else: TypeExcept() # Identity # -------- lib.ElIdentity_i.argtypes = \ lib.ElIdentity_s.argtypes = \ lib.ElIdentity_d.argtypes = \ lib.ElIdentity_c.argtypes = \ lib.ElIdentity_z.argtypes = \ lib.ElIdentityDist_i.argtypes = \ lib.ElIdentityDist_s.argtypes = \ lib.ElIdentityDist_d.argtypes = \ lib.ElIdentityDist_c.argtypes = \ lib.ElIdentityDist_z.argtypes = \ lib.ElIdentitySparse_i.argtypes = \ lib.ElIdentitySparse_s.argtypes = \ lib.ElIdentitySparse_d.argtypes = \ lib.ElIdentitySparse_c.argtypes = \ lib.ElIdentitySparse_z.argtypes = \ lib.ElIdentityDistSparse_i.argtypes = \ lib.ElIdentityDistSparse_s.argtypes = \ lib.ElIdentityDistSparse_d.argtypes = \ lib.ElIdentityDistSparse_c.argtypes = \ lib.ElIdentityDistSparse_z.argtypes = \ [c_void_p,iType,iType] def Identity(A,m,n): args = [A.obj,m,n] if type(A) is Matrix: if A.tag == iTag: lib.ElIdentity_i(*args) elif A.tag == sTag: lib.ElIdentity_s(*args) elif A.tag == dTag: lib.ElIdentity_d(*args) elif A.tag == cTag: lib.ElIdentity_c(*args) elif A.tag == zTag: lib.ElIdentity_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElIdentityDist_i(*args) elif A.tag == sTag: lib.ElIdentityDist_s(*args) elif A.tag == dTag: lib.ElIdentityDist_d(*args) elif A.tag == cTag: lib.ElIdentityDist_c(*args) elif A.tag == zTag: lib.ElIdentityDist_z(*args) else: DataExcept() elif type(A) is SparseMatrix: if A.tag == iTag: lib.ElIdentitySparse_i(*args) elif A.tag == sTag: lib.ElIdentitySparse_s(*args) elif A.tag == dTag: lib.ElIdentitySparse_d(*args) elif A.tag == cTag: lib.ElIdentitySparse_c(*args) elif A.tag == zTag: lib.ElIdentitySparse_z(*args) else: DataExcept() elif type(A) is DistSparseMatrix: if A.tag == iTag: lib.ElIdentityDistSparse_i(*args) elif A.tag == sTag: lib.ElIdentityDistSparse_s(*args) elif A.tag == dTag: lib.ElIdentityDistSparse_d(*args) elif A.tag == cTag: lib.ElIdentityDistSparse_c(*args) elif A.tag == zTag: lib.ElIdentityDistSparse_z(*args) else: DataExcept() else: TypeExcept() # Jordan # ------ lib.ElJordan_i.argtypes = \ lib.ElJordanDist_i.argtypes = \ [c_void_p,iType,iType] lib.ElJordan_s.argtypes = \ lib.ElJordanDist_s.argtypes = \ [c_void_p,iType,sType] lib.ElJordan_d.argtypes = \ lib.ElJordanDist_d.argtypes = \ [c_void_p,iType,dType] lib.ElJordan_c.argtypes = \ lib.ElJordanDist_c.argtypes = \ [c_void_p,iType,cType] lib.ElJordan_z.argtypes = \ lib.ElJordanDist_z.argtypes = \ [c_void_p,iType,zType] def Jordan(J,n,lambPre): lamb = TagToType(J.tag)(lambPre) args = [J.obj,n,lamb] if type(J) is Matrix: if J.tag == iTag: lib.ElJordan_i(*args) elif J.tag == sTag: lib.ElJordan_s(*args) elif J.tag == dTag: lib.ElJordan_d(*args) elif J.tag == cTag: lib.ElJordan_c(*args) elif J.tag == zTag: lib.ElJordan_z(*args) else: DataExcept() elif type(J) is DistMatrix: if J.tag == iTag: lib.ElJordanDist_i(*args) elif J.tag == sTag: lib.ElJordanDist_s(*args) elif J.tag == dTag: lib.ElJordanDist_d(*args) elif J.tag == cTag: lib.ElJordanDist_c(*args) elif J.tag == zTag: lib.ElJordanDist_z(*args) else: DataExcept() else: TypeExcept() # Jordan-Cholesky # --------------- lib.ElJordanCholesky_s.argtypes = \ lib.ElJordanCholesky_d.argtypes = \ lib.ElJordanCholesky_c.argtypes = \ lib.ElJordanCholesky_z.argtypes = \ lib.ElJordanCholeskyDist_s.argtypes = \ lib.ElJordanCholeskyDist_d.argtypes = \ lib.ElJordanCholeskyDist_c.argtypes = \ lib.ElJordanCholeskyDist_z.argtypes = \ lib.ElJordanCholeskySparse_s.argtypes = \ lib.ElJordanCholeskySparse_d.argtypes = \ lib.ElJordanCholeskySparse_c.argtypes = \ lib.ElJordanCholeskySparse_z.argtypes = \ lib.ElJordanCholeskyDistSparse_s.argtypes = \ lib.ElJordanCholeskyDistSparse_d.argtypes = \ lib.ElJordanCholeskyDistSparse_c.argtypes = \ lib.ElJordanCholeskyDistSparse_z.argtypes = \ [c_void_p,iType] def JordanCholesky(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == sTag: lib.ElJordanCholesky_s(*args) elif A.tag == dTag: lib.ElJordanCholesky_d(*args) elif A.tag == cTag: lib.ElJordanCholesky_c(*args) elif A.tag == zTag: lib.ElJordanCholesky_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElJordanCholeskyDist_s(*args) elif A.tag == dTag: lib.ElJordanCholeskyDist_d(*args) elif A.tag == cTag: lib.ElJordanCholeskyDist_c(*args) elif A.tag == zTag: lib.ElJordanCholeskyDist_z(*args) else: DataExcept() elif type(A) is SparseMatrix: if A.tag == sTag: lib.ElJordanCholeskySparse_s(*args) elif A.tag == dTag: lib.ElJordanCholeskySparse_d(*args) elif A.tag == cTag: lib.ElJordanCholeskySparse_c(*args) elif A.tag == zTag: lib.ElJordanCholeskySparse_z(*args) else: DataExcept() elif type(A) is DistSparseMatrix: if A.tag == sTag: lib.ElJordanCholeskyDistSparse_s(*args) elif A.tag == dTag: lib.ElJordanCholeskyDistSparse_d(*args) elif A.tag == cTag: lib.ElJordanCholeskyDistSparse_c(*args) elif A.tag == zTag: lib.ElJordanCholeskyDistSparse_z(*args) else: DataExcept() else: TypeExcept() # Kahan # ----- lib.ElKahan_s.argtypes = \ lib.ElKahanDist_s.argtypes = \ [c_void_p,iType,sType] lib.ElKahan_d.argtypes = \ lib.ElKahanDist_d.argtypes = \ [c_void_p,iType,dType] lib.ElKahan_c.argtypes = \ lib.ElKahanDist_c.argtypes = \ [c_void_p,iType,cType] lib.ElKahan_z.argtypes = \ lib.ElKahanDist_z.argtypes = \ [c_void_p,iType,zType] def Kahan(A,n,phi): args = [A.obj,n,phi] if type(A) is Matrix: if A.tag == sTag: lib.ElKahan_s(*args) elif A.tag == dTag: lib.ElKahan_d(*args) elif A.tag == cTag: lib.ElKahan_c(*args) elif A.tag == zTag: lib.ElKahan_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElKahanDist_s(*args) elif A.tag == dTag: lib.ElKahanDist_d(*args) elif A.tag == cTag: lib.ElKahanDist_c(*args) elif A.tag == zTag: lib.ElKahanDist_z(*args) else: DataExcept() else: TypeExcept() # KMS # --- lib.ElKMS_i.argtypes = \ lib.ElKMSDist_i.argtypes = \ [c_void_p,iType,iType] lib.ElKMS_s.argtypes = \ lib.ElKMSDist_s.argtypes = \ [c_void_p,iType,sType] lib.ElKMS_d.argtypes = \ lib.ElKMSDist_d.argtypes = \ [c_void_p,iType,dType] lib.ElKMS_c.argtypes = \ lib.ElKMSDist_c.argtypes = \ [c_void_p,iType,cType] lib.ElKMS_z.argtypes = \ lib.ElKMSDist_z.argtypes = \ [c_void_p,iType,zType] def KMS(K,n,rho): args = [K.obj,n,rho] if type(K) is Matrix: if K.tag == iTag: lib.ElKMS_i(*args) elif K.tag == sTag: lib.ElKMS_s(*args) elif K.tag == dTag: lib.ElKMS_d(*args) elif K.tag == cTag: lib.ElKMS_c(*args) elif K.tag == zTag: lib.ElKMS_z(*args) else: DataExcept() elif type(K) is DistMatrix: if K.tag == iTag: lib.ElKMSDist_i(*args) elif K.tag == sTag: lib.ElKMSDist_s(*args) elif K.tag == dTag: lib.ElKMSDist_d(*args) elif K.tag == cTag: lib.ElKMSDist_c(*args) elif K.tag == zTag: lib.ElKMSDist_z(*args) else: DataExcept() else: TypeExcept() # Laplacian # --------- lib.ElLaplacian1D_s.argtypes = \ lib.ElLaplacian1D_d.argtypes = \ lib.ElLaplacian1D_c.argtypes = \ lib.ElLaplacian1D_z.argtypes = \ lib.ElLaplacian1DDist_s.argtypes = \ lib.ElLaplacian1DDist_d.argtypes = \ lib.ElLaplacian1DDist_c.argtypes = \ lib.ElLaplacian1DDist_z.argtypes = \ lib.ElLaplacian1DSparse_s.argtypes = \ lib.ElLaplacian1DSparse_d.argtypes = \ lib.ElLaplacian1DSparse_c.argtypes = \ lib.ElLaplacian1DSparse_z.argtypes = \ lib.ElLaplacian1DDistSparse_s.argtypes = \ lib.ElLaplacian1DDistSparse_d.argtypes = \ lib.ElLaplacian1DDistSparse_c.argtypes = \ lib.ElLaplacian1DDistSparse_z.argtypes = \ [c_void_p,iType] def Laplacian1D(L,nx): args = [L.obj,nx] if type(L) is Matrix: if L.tag == sTag: lib.ElLaplacian1D_s(*args) elif L.tag == dTag: lib.ElLaplacian1D_d(*args) elif L.tag == cTag: lib.ElLaplacian1D_c(*args) elif L.tag == zTag: lib.ElLaplacian1D_z(*args) else: DataExcept() elif type(L) is DistMatrix: if L.tag == sTag: lib.ElLaplacian1DDist_s(*args) elif L.tag == dTag: lib.ElLaplacian1DDist_d(*args) elif L.tag == cTag: lib.ElLaplacian1DDist_c(*args) elif L.tag == zTag: lib.ElLaplacian1DDist_z(*args) else: DataExcept() elif type(L) is SparseMatrix: if L.tag == sTag: lib.ElLaplacian1DSparse_s(*args) elif L.tag == dTag: lib.ElLaplacian1DSparse_d(*args) elif L.tag == cTag: lib.ElLaplacian1DSparse_c(*args) elif L.tag == zTag: lib.ElLaplacian1DSparse_z(*args) else: DataExcept() elif type(L) is DistSparseMatrix: if L.tag == sTag: lib.ElLaplacian1DDistSparse_s(*args) elif L.tag == dTag: lib.ElLaplacian1DDistSparse_d(*args) elif L.tag == cTag: lib.ElLaplacian1DDistSparse_c(*args) elif L.tag == zTag: lib.ElLaplacian1DDistSparse_z(*args) else: DataExcept() else: TypeExcept() # LEFT OFF HERE (TODO: Add sparse wrappers) lib.ElLaplacian2D_s.argtypes = \ lib.ElLaplacian2D_d.argtypes = \ lib.ElLaplacian2D_c.argtypes = \ lib.ElLaplacian2D_z.argtypes = \ lib.ElLaplacian2DDist_s.argtypes = \ lib.ElLaplacian2DDist_d.argtypes = \ lib.ElLaplacian2DDist_c.argtypes = \ lib.ElLaplacian2DDist_z.argtypes = \ [c_void_p,iType,iType] def Laplacian2D(L,nx,ny): args = [L.obj,nx,ny] if type(L) is Matrix: if L.tag == sTag: lib.ElLaplacian2D_s(*args) elif L.tag == dTag: lib.ElLaplacian2D_d(*args) elif L.tag == cTag: lib.ElLaplacian2D_c(*args) elif L.tag == zTag: lib.ElLaplacian2D_z(*args) else: DataExcept() elif type(L) is DistMatrix: if L.tag == sTag: lib.ElLaplacian2DDist_s(*args) elif L.tag == dTag: lib.ElLaplacian2DDist_d(*args) elif L.tag == cTag: lib.ElLaplacian2DDist_c(*args) elif L.tag == zTag: lib.ElLaplacian2DDist_z(*args) else: DataExcept() else: TypeExcept() lib.ElLaplacian3D_s.argtypes = \ lib.ElLaplacian3D_d.argtypes = \ lib.ElLaplacian3D_c.argtypes = \ lib.ElLaplacian3D_z.argtypes = \ lib.ElLaplacian3DDist_s.argtypes = \ lib.ElLaplacian3DDist_d.argtypes = \ lib.ElLaplacian3DDist_c.argtypes = \ lib.ElLaplacian3DDist_z.argtypes = \ [c_void_p,iType,iType,iType] def Laplacian3D(L,nx,ny,nz): args = [L.obj,nx,ny,nz] if type(L) is Matrix: if L.tag == sTag: lib.ElLaplacian3D_s(*args) elif L.tag == dTag: lib.ElLaplacian3D_d(*args) elif L.tag == cTag: lib.ElLaplacian3D_c(*args) elif L.tag == zTag: lib.ElLaplacian3D_z(*args) else: DataExcept() elif type(L) is DistMatrix: if L.tag == sTag: lib.ElLaplacian3DDist_s(*args) elif L.tag == dTag: lib.ElLaplacian3DDist_d(*args) elif L.tag == cTag: lib.ElLaplacian3DDist_c(*args) elif L.tag == zTag: lib.ElLaplacian3DDist_z(*args) else: DataExcept() else: TypeExcept() # Lauchli # ------- lib.ElLauchli_i.argtypes = \ lib.ElLauchliDist_i.argtypes = \ [c_void_p,iType,iType] lib.ElLauchli_s.argtypes = \ lib.ElLauchliDist_s.argtypes = \ [c_void_p,iType,sType] lib.ElLauchli_d.argtypes = \ lib.ElLauchliDist_d.argtypes = \ [c_void_p,iType,dType] lib.ElLauchli_c.argtypes = \ lib.ElLauchliDist_c.argtypes = \ [c_void_p,iType,cType] lib.ElLauchli_z.argtypes = \ lib.ElLauchliDist_z.argtypes = \ [c_void_p,iType,zType] def Lauchli(A,n,mu): args = [A.obj,n,mu] if type(A) is Matrix: if A.tag == iTag: lib.ElLauchli_i(*args) elif A.tag == sTag: lib.ElLauchli_s(*args) elif A.tag == dTag: lib.ElLauchli_d(*args) elif A.tag == cTag: lib.ElLauchli_c(*args) elif A.tag == zTag: lib.ElLauchli_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElLauchliDist_i(*args) elif A.tag == sTag: lib.ElLauchliDist_s(*args) elif A.tag == dTag: lib.ElLauchliDist_d(*args) elif A.tag == cTag: lib.ElLauchliDist_c(*args) elif A.tag == zTag: lib.ElLauchliDist_z(*args) else: DataExcept() else: TypeExcept() # Legendre # -------- lib.ElLegendre_s.argtypes = \ lib.ElLegendre_d.argtypes = \ lib.ElLegendre_c.argtypes = \ lib.ElLegendre_z.argtypes = \ lib.ElLegendreDist_s.argtypes = \ lib.ElLegendreDist_d.argtypes = \ lib.ElLegendreDist_c.argtypes = \ lib.ElLegendreDist_z.argtypes = \ [c_void_p,iType] def Legendre(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == sTag: lib.ElLegendre_s(*args) elif A.tag == dTag: lib.ElLegendre_d(*args) elif A.tag == cTag: lib.ElLegendre_c(*args) elif A.tag == zTag: lib.ElLegendre_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElLegendreDist_s(*args) elif A.tag == dTag: lib.ElLegendreDist_d(*args) elif A.tag == cTag: lib.ElLegendreDist_c(*args) elif A.tag == zTag: lib.ElLegendreDist_z(*args) else: DataExcept() else: TypeExcept() # Lehmer # ------ lib.ElLehmer_s.argtypes = \ lib.ElLehmer_d.argtypes = \ lib.ElLehmer_c.argtypes = \ lib.ElLehmer_z.argtypes = \ lib.ElLehmerDist_s.argtypes = \ lib.ElLehmerDist_d.argtypes = \ lib.ElLehmerDist_c.argtypes = \ lib.ElLehmerDist_z.argtypes = \ [c_void_p,iType] def Lehmer(L,n): args = [L.obj,n] if type(L) is Matrix: if L.tag == sTag: lib.ElLehmer_s(*args) elif L.tag == dTag: lib.ElLehmer_d(*args) elif L.tag == cTag: lib.ElLehmer_c(*args) elif L.tag == zTag: lib.ElLehmer_z(*args) else: DataExcept() elif type(L) is DistMatrix: if L.tag == sTag: lib.ElLehmerDist_s(*args) elif L.tag == dTag: lib.ElLehmerDist_d(*args) elif L.tag == cTag: lib.ElLehmerDist_c(*args) elif L.tag == zTag: lib.ElLehmerDist_z(*args) else: DataExcept() else: TypeExcept() # Lotkin # ------ lib.ElLotkin_s.argtypes = \ lib.ElLotkin_d.argtypes = \ lib.ElLotkin_c.argtypes = \ lib.ElLotkin_z.argtypes = \ lib.ElLotkinDist_s.argtypes = \ lib.ElLotkinDist_d.argtypes = \ lib.ElLotkinDist_c.argtypes = \ lib.ElLotkinDist_z.argtypes = \ [c_void_p,iType] def Lotkin(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == sTag: lib.ElLotkin_s(*args) elif A.tag == dTag: lib.ElLotkin_d(*args) elif A.tag == cTag: lib.ElLotkin_c(*args) elif A.tag == zTag: lib.ElLotkin_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElLotkinDist_s(*args) elif A.tag == dTag: lib.ElLotkinDist_d(*args) elif A.tag == cTag: lib.ElLotkinDist_c(*args) elif A.tag == zTag: lib.ElLotkinDist_z(*args) else: DataExcept() else: TypeExcept() # MinIJ # ----- lib.ElMinIJ_i.argtypes = \ lib.ElMinIJ_s.argtypes = \ lib.ElMinIJ_d.argtypes = \ lib.ElMinIJ_c.argtypes = \ lib.ElMinIJ_z.argtypes = \ lib.ElMinIJDist_i.argtypes = \ lib.ElMinIJDist_s.argtypes = \ lib.ElMinIJDist_d.argtypes = \ lib.ElMinIJDist_c.argtypes = \ lib.ElMinIJDist_z.argtypes = \ [c_void_p,iType] def MinIJ(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == iTag: lib.ElMinIJ_i(*args) elif A.tag == sTag: lib.ElMinIJ_s(*args) elif A.tag == dTag: lib.ElMinIJ_d(*args) elif A.tag == cTag: lib.ElMinIJ_c(*args) elif A.tag == zTag: lib.ElMinIJ_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElMinIJDist_i(*args) elif A.tag == sTag: lib.ElMinIJDist_s(*args) elif A.tag == dTag: lib.ElMinIJDist_d(*args) elif A.tag == cTag: lib.ElMinIJDist_c(*args) elif A.tag == zTag: lib.ElMinIJDist_z(*args) else: DataExcept() else: TypeExcept() # Normal from EVD # --------------- lib.ElNormalFromEVD_c.argtypes = \ lib.ElNormalFromEVD_z.argtypes = \ lib.ElNormalFromEVDDist_c.argtypes = \ lib.ElNormalFromEVDDist_z.argtypes = \ [c_void_p,c_void_p,c_void_p] def NormalFromEVD(A,w,Z): if type(A) is not type(w): raise Exception('Types of A and w must match') if type(A) is not type(Z): raise Exception('Types of A and Z must match') if Z.tag != A.tag: raise Exception('Datatypes of A and Z must match') if w.tag != Base(A.tag): raise Exception('Base datatype of A must match w') args = [A.obj,w.obj,Z.obj] if type(A) is Matrix: if A.tag == cTag: lib.ElNormalFromEVD_c(*args) elif A.tag == zTag: lib.ElNormalFromEVD_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == cTag: lib.ElNormalFromEVDDist_c(*args) elif A.tag == zTag: lib.ElNormalFromEVDDist_z(*args) else: DataExcept() else: TypeExcept() # Ones # ---- lib.ElOnes_i.argtypes = \ lib.ElOnes_s.argtypes = \ lib.ElOnes_d.argtypes = \ lib.ElOnes_c.argtypes = \ lib.ElOnes_z.argtypes = \ lib.ElOnesDist_i.argtypes = \ lib.ElOnesDist_s.argtypes = \ lib.ElOnesDist_d.argtypes = \ lib.ElOnesDist_c.argtypes = \ lib.ElOnesDist_z.argtypes = \ lib.ElOnesDistMultiVec_i.argtypes = \ lib.ElOnesDistMultiVec_s.argtypes = \ lib.ElOnesDistMultiVec_d.argtypes = \ lib.ElOnesDistMultiVec_c.argtypes = \ lib.ElOnesDistMultiVec_z.argtypes = \ [c_void_p,iType,iType] def Ones(A,m,n): args = [A.obj,m,n] if type(A) is Matrix: if A.tag == iTag: lib.ElOnes_i(*args) elif A.tag == sTag: lib.ElOnes_s(*args) elif A.tag == dTag: lib.ElOnes_d(*args) elif A.tag == cTag: lib.ElOnes_c(*args) elif A.tag == zTag: lib.ElOnes_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElOnesDist_i(*args) elif A.tag == sTag: lib.ElOnesDist_s(*args) elif A.tag == dTag: lib.ElOnesDist_d(*args) elif A.tag == cTag: lib.ElOnesDist_c(*args) elif A.tag == zTag: lib.ElOnesDist_z(*args) else: DataExcept() elif type(A) is DistMultiVec: if A.tag == iTag: lib.ElOnesDistMultiVec_i(*args) elif A.tag == sTag: lib.ElOnesDistMultiVec_s(*args) elif A.tag == dTag: lib.ElOnesDistMultiVec_d(*args) elif A.tag == cTag: lib.ElOnesDistMultiVec_c(*args) elif A.tag == zTag: lib.ElOnesDistMultiVec_z(*args) else: DataExcept() else: TypeExcept() # 1-2-1 matrix # ------------ lib.ElOneTwoOne_i.argtypes = \ lib.ElOneTwoOne_s.argtypes = \ lib.ElOneTwoOne_d.argtypes = \ lib.ElOneTwoOne_c.argtypes = \ lib.ElOneTwoOne_z.argtypes = \ lib.ElOneTwoOneDist_i.argtypes = \ lib.ElOneTwoOneDist_s.argtypes = \ lib.ElOneTwoOneDist_d.argtypes = \ lib.ElOneTwoOneDist_c.argtypes = \ lib.ElOneTwoOneDist_z.argtypes = \ [c_void_p,iType] def OneTwoOne(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == iTag: lib.ElOneTwoOne_i(*args) elif A.tag == sTag: lib.ElOneTwoOne_s(*args) elif A.tag == dTag: lib.ElOneTwoOne_d(*args) elif A.tag == cTag: lib.ElOneTwoOne_c(*args) elif A.tag == zTag: lib.ElOneTwoOne_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElOneTwoOneDist_i(*args) elif A.tag == sTag: lib.ElOneTwoOneDist_s(*args) elif A.tag == dTag: lib.ElOneTwoOneDist_d(*args) elif A.tag == cTag: lib.ElOneTwoOneDist_c(*args) elif A.tag == zTag: lib.ElOneTwoOneDist_z(*args) else: DataExcept() else: TypeExcept() # Parter # ------ lib.ElParter_s.argtypes = \ lib.ElParter_d.argtypes = \ lib.ElParter_c.argtypes = \ lib.ElParter_z.argtypes = \ lib.ElParterDist_s.argtypes = \ lib.ElParterDist_d.argtypes = \ lib.ElParterDist_c.argtypes = \ lib.ElParterDist_z.argtypes = \ [c_void_p,iType] def Parter(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == sTag: lib.ElParter_s(*args) elif A.tag == dTag: lib.ElParter_d(*args) elif A.tag == cTag: lib.ElParter_c(*args) elif A.tag == zTag: lib.ElParter_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElParterDist_s(*args) elif A.tag == dTag: lib.ElParterDist_d(*args) elif A.tag == cTag: lib.ElParterDist_c(*args) elif A.tag == zTag: lib.ElParterDist_z(*args) else: DataExcept() else: TypeExcept() # Pei # --- lib.ElPei_s.argtypes = \ lib.ElPeiDist_s.argtypes = \ [c_void_p,iType,sType] lib.ElPei_d.argtypes = \ lib.ElPeiDist_d.argtypes = \ [c_void_p,iType,dType] lib.ElPei_c.argtypes = \ lib.ElPeiDist_c.argtypes = \ [c_void_p,iType,cType] lib.ElPei_z.argtypes = \ lib.ElPeiDist_z.argtypes = \ [c_void_p,iType,zType] def Pei(A,n,alpha): args = [A.obj,n,alpha] if type(A) is Matrix: if A.tag == sTag: lib.ElPei_s(*args) elif A.tag == dTag: lib.ElPei_d(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElPeiDist_s(*args) elif A.tag == dTag: lib.ElPeiDist_d(*args) else: DataExcept() else: TypeExcept() # Redheffer # --------- lib.ElRedheffer_i.argtypes = \ lib.ElRedheffer_s.argtypes = \ lib.ElRedheffer_d.argtypes = \ lib.ElRedheffer_c.argtypes = \ lib.ElRedheffer_z.argtypes = \ lib.ElRedhefferDist_i.argtypes = \ lib.ElRedhefferDist_s.argtypes = \ lib.ElRedhefferDist_d.argtypes = \ lib.ElRedhefferDist_c.argtypes = \ lib.ElRedhefferDist_z.argtypes = \ [c_void_p,iType] def Redheffer(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == iTag: lib.ElRedheffer_i(*args) elif A.tag == sTag: lib.ElRedheffer_s(*args) elif A.tag == dTag: lib.ElRedheffer_d(*args) elif A.tag == cTag: lib.ElRedheffer_c(*args) elif A.tag == zTag: lib.ElRedheffer_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElRedhefferDist_i(*args) elif A.tag == sTag: lib.ElRedhefferDist_s(*args) elif A.tag == dTag: lib.ElRedhefferDist_d(*args) elif A.tag == cTag: lib.ElRedhefferDist_c(*args) elif A.tag == zTag: lib.ElRedhefferDist_z(*args) else: DataExcept() else: TypeExcept() # Riffle # ------ lib.ElRiffle_s.argtypes = \ lib.ElRiffle_d.argtypes = \ lib.ElRiffle_c.argtypes = \ lib.ElRiffle_z.argtypes = \ lib.ElRiffleDist_s.argtypes = \ lib.ElRiffleDist_d.argtypes = \ lib.ElRiffleDist_c.argtypes = \ lib.ElRiffleDist_z.argtypes = \ [c_void_p,iType] def Riffle(P,n): args = [P.obj,n] if type(P) is Matrix: if P.tag == sTag: lib.ElRiffle_s(*args) elif P.tag == dTag: lib.ElRiffle_d(*args) elif P.tag == cTag: lib.ElRiffle_c(*args) elif P.tag == zTag: lib.ElRiffle_z(*args) else: DataExcept() elif type(P) is DistMatrix: if P.tag == sTag: lib.ElRiffleDist_s(*args) elif P.tag == dTag: lib.ElRiffleDist_d(*args) elif P.tag == cTag: lib.ElRiffleDist_c(*args) elif P.tag == zTag: lib.ElRiffleDist_z(*args) else: DataExcept() else: TypeExcept() lib.ElRiffleStationary_s.argtypes = \ lib.ElRiffleStationary_d.argtypes = \ lib.ElRiffleStationary_c.argtypes = \ lib.ElRiffleStationary_z.argtypes = \ lib.ElRiffleStationaryDist_s.argtypes = \ lib.ElRiffleStationaryDist_d.argtypes = \ lib.ElRiffleStationaryDist_c.argtypes = \ lib.ElRiffleStationaryDist_z.argtypes = \ [c_void_p,iType] def RiffleStationary(P,n): args = [P.obj,n] if type(P) is Matrix: if P.tag == sTag: lib.ElRiffleStationary_s(*args) elif P.tag == dTag: lib.ElRiffleStationary_d(*args) elif P.tag == cTag: lib.ElRiffleStationary_c(*args) elif P.tag == zTag: lib.ElRiffleStationary_z(*args) else: DataExcept() elif type(P) is DistMatrix: if P.tag == sTag: lib.ElRiffleStationaryDist_s(*args) elif P.tag == dTag: lib.ElRiffleStationaryDist_d(*args) elif P.tag == cTag: lib.ElRiffleStationaryDist_c(*args) elif P.tag == zTag: lib.ElRiffleStationaryDist_z(*args) else: DataExcept() else: TypeExcept() lib.ElRiffleDecay_s.argtypes = \ lib.ElRiffleDecay_d.argtypes = \ lib.ElRiffleDecay_c.argtypes = \ lib.ElRiffleDecay_z.argtypes = \ lib.ElRiffleDecayDist_s.argtypes = \ lib.ElRiffleDecayDist_d.argtypes = \ lib.ElRiffleDecayDist_c.argtypes = \ lib.ElRiffleDecayDist_z.argtypes = \ [c_void_p,iType] def RiffleDecay(P,n): args = [P.obj,n] if type(P) is Matrix: if P.tag == sTag: lib.ElRiffleDecay_s(*args) elif P.tag == dTag: lib.ElRiffleDecay_d(*args) elif P.tag == cTag: lib.ElRiffleDecay_c(*args) elif P.tag == zTag: lib.ElRiffleDecay_z(*args) else: DataExcept() elif type(P) is DistMatrix: if P.tag == sTag: lib.ElRiffleDecayDist_s(*args) elif P.tag == dTag: lib.ElRiffleDecayDist_d(*args) elif P.tag == cTag: lib.ElRiffleDecayDist_c(*args) elif P.tag == zTag: lib.ElRiffleDecayDist_z(*args) else: DataExcept() else: TypeExcept() # Ris # --- lib.ElRis_s.argtypes = \ lib.ElRis_d.argtypes = \ lib.ElRis_c.argtypes = \ lib.ElRis_z.argtypes = \ lib.ElRisDist_s.argtypes = \ lib.ElRisDist_d.argtypes = \ lib.ElRisDist_c.argtypes = \ lib.ElRisDist_z.argtypes = \ [c_void_p,iType] def Ris(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == sTag: lib.ElRis_s(*args) elif A.tag == dTag: lib.ElRis_d(*args) elif A.tag == cTag: lib.ElRis_c(*args) elif A.tag == zTag: lib.ElRis_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElRisDist_s(*args) elif A.tag == dTag: lib.ElRisDist_d(*args) elif A.tag == cTag: lib.ElRisDist_c(*args) elif A.tag == zTag: lib.ElRisDist_z(*args) else: DataExcept() else: TypeExcept() # Toeplitz # -------- lib.ElToeplitz_i.argtypes = [c_void_p,iType,iType,iType,POINTER(iType)] lib.ElToeplitzDist_i.argtypes = [c_void_p,iType,iType,iType,POINTER(iType)] lib.ElToeplitz_s.argtypes = [c_void_p,iType,iType,iType,POINTER(sType)] lib.ElToeplitzDist_s.argtypes = [c_void_p,iType,iType,iType,POINTER(sType)] lib.ElToeplitz_d.argtypes = [c_void_p,iType,iType,iType,POINTER(dType)] lib.ElToeplitzDist_d.argtypes = [c_void_p,iType,iType,iType,POINTER(dType)] lib.ElToeplitz_c.argtypes = [c_void_p,iType,iType,iType,POINTER(cType)] lib.ElToeplitzDist_c.argtypes = [c_void_p,iType,iType,iType,POINTER(cType)] lib.ElToeplitz_z.argtypes = [c_void_p,iType,iType,iType,POINTER(zType)] lib.ElToeplitzDist_z.argtypes = [c_void_p,iType,iType,iType,POINTER(zType)] def Toeplitz(A,m,n,a): aLen = len(a) aBuf = (TagToType(A.tag)*aLen)(*a) args = [A.obj,m,n,aLen,aBuf] if type(A) is Matrix: if A.tag == iTag: lib.ElToeplitz_i(*args) elif A.tag == sTag: lib.ElToeplitz_s(*args) elif A.tag == dTag: lib.ElToeplitz_d(*args) elif A.tag == cTag: lib.ElToeplitz_c(*args) elif A.tag == zTag: lib.ElToeplitz_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElToeplitzDist_i(*args) elif A.tag == sTag: lib.ElToeplitzDist_s(*args) elif A.tag == dTag: lib.ElToeplitzDist_d(*args) elif A.tag == cTag: lib.ElToeplitzDist_c(*args) elif A.tag == zTag: lib.ElToeplitzDist_z(*args) else: DataExcept() else: TypeExcept() # Trefethen-Embree # ---------------- lib.ElTrefethenEmbree_c.argtypes = \ lib.ElTrefethenEmbree_z.argtypes = \ lib.ElTrefethenEmbreeDist_c.argtypes = \ lib.ElTrefethenEmbreeDist_z.argtypes = \ [c_void_p,iType] def TrefethenEmbree(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == cTag: lib.ElTrefethenEmbree_c(*args) elif A.tag == zTag: lib.ElTrefethenEmbree_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == cTag: lib.ElTrefethenEmbreeDist_c(*args) elif A.tag == zTag: lib.ElTrefethenEmbreeDist_z(*args) else: DataExcept() else: TypeExcept() # Triangle # -------- lib.ElTriangle_c.argtypes = \ lib.ElTriangle_z.argtypes = \ lib.ElTriangleDist_c.argtypes = \ lib.ElTriangleDist_z.argtypes = \ [c_void_p,iType] def Triangle(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == cTag: lib.ElTriangle_c(*args) elif A.tag == zTag: lib.ElTriangle_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == cTag: lib.ElTriangleDist_c(*args) elif A.tag == zTag: lib.ElTriangleDist_z(*args) else: DataExcept() else: TypeExcept() # TriW # ---- lib.ElTriW_i.argtypes = [c_void_p,iType,iType,iType] lib.ElTriW_s.argtypes = [c_void_p,iType,sType,iType] lib.ElTriW_d.argtypes = [c_void_p,iType,dType,iType] lib.ElTriW_c.argtypes = [c_void_p,iType,cType,iType] lib.ElTriW_z.argtypes = [c_void_p,iType,zType,iType] lib.ElTriWDist_i.argtypes = [c_void_p,iType,iType,iType] lib.ElTriWDist_s.argtypes = [c_void_p,iType,sType,iType] lib.ElTriWDist_d.argtypes = [c_void_p,iType,dType,iType] lib.ElTriWDist_c.argtypes = [c_void_p,iType,cType,iType] lib.ElTriWDist_z.argtypes = [c_void_p,iType,zType,iType] def TriW(A,n,alpha,k): args = [A.obj,n,alpha,k] if type(A) is Matrix: if A.tag == iTag: lib.ElTriW_i(*args) elif A.tag == sTag: lib.ElTriW_s(*args) elif A.tag == dTag: lib.ElTriW_d(*args) elif A.tag == cTag: lib.ElTriW_c(*args) elif A.tag == zTag: lib.ElTriW_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElTriWDist_i(*args) elif A.tag == sTag: lib.ElTriWDist_s(*args) elif A.tag == dTag: lib.ElTriWDist_d(*args) elif A.tag == cTag: lib.ElTriWDist_c(*args) elif A.tag == zTag: lib.ElTriWDist_z(*args) else: DataExcept() else: TypeExcept() # Walsh # ----- lib.ElWalsh_i.argtypes = \ lib.ElWalsh_s.argtypes = \ lib.ElWalsh_d.argtypes = \ lib.ElWalsh_c.argtypes = \ lib.ElWalsh_z.argtypes = \ lib.ElWalshDist_i.argtypes = \ lib.ElWalshDist_s.argtypes = \ lib.ElWalshDist_d.argtypes = \ lib.ElWalshDist_c.argtypes = \ lib.ElWalshDist_z.argtypes = \ [c_void_p,iType,bType] def Walsh(A,k,binary=False): args = [A.obj,k,binary] if type(A) is Matrix: if A.tag == iTag: lib.ElWalsh_i(*args) elif A.tag == sTag: lib.ElWalsh_s(*args) elif A.tag == dTag: lib.ElWalsh_d(*args) elif A.tag == cTag: lib.ElWalsh_c(*args) elif A.tag == zTag: lib.ElWalsh_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElWalshDist_i(*args) elif A.tag == sTag: lib.ElWalshDist_s(*args) elif A.tag == dTag: lib.ElWalshDist_d(*args) elif A.tag == cTag: lib.ElWalshDist_c(*args) elif A.tag == zTag: lib.ElWalshDist_z(*args) else: DataExcept() else: TypeExcept() # Walsh-Identity # -------------- lib.ElWalshIdentity_i.argtypes = \ lib.ElWalshIdentity_s.argtypes = \ lib.ElWalshIdentity_d.argtypes = \ lib.ElWalshIdentity_c.argtypes = \ lib.ElWalshIdentity_z.argtypes = \ lib.ElWalshIdentityDist_i.argtypes = \ lib.ElWalshIdentityDist_s.argtypes = \ lib.ElWalshIdentityDist_d.argtypes = \ lib.ElWalshIdentityDist_c.argtypes = \ lib.ElWalshIdentityDist_z.argtypes = \ [c_void_p,iType,bType] def WalshIdentity(A,k,binary=False): args = [A.obj,k,binary] if type(A) is Matrix: if A.tag == iTag: lib.ElWalshIdentity_i(*args) elif A.tag == sTag: lib.ElWalshIdentity_s(*args) elif A.tag == dTag: lib.ElWalshIdentity_d(*args) elif A.tag == cTag: lib.ElWalshIdentity_c(*args) elif A.tag == zTag: lib.ElWalshIdentity_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElWalshIdentityDist_i(*args) elif A.tag == sTag: lib.ElWalshIdentityDist_s(*args) elif A.tag == dTag: lib.ElWalshIdentityDist_d(*args) elif A.tag == cTag: lib.ElWalshIdentityDist_c(*args) elif A.tag == zTag: lib.ElWalshIdentityDist_z(*args) else: DataExcept() else: TypeExcept() # Whale # ----- lib.ElWhale_c.argtypes = \ lib.ElWhale_z.argtypes = \ lib.ElWhaleDist_c.argtypes = \ lib.ElWhaleDist_z.argtypes = \ [c_void_p,iType] def Whale(A,n): args = [A.obj,n] if type(A) is Matrix: if A.tag == cTag: lib.ElWhale_c(*args) elif A.tag == zTag: lib.ElWhale_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == cTag: lib.ElWhaleDist_c(*args) elif A.tag == zTag: lib.ElWhaleDist_z(*args) else: DataExcept() else: TypeExcept() # Wilkinson # --------- lib.ElWilkinson_i.argtypes = \ lib.ElWilkinson_s.argtypes = \ lib.ElWilkinson_d.argtypes = \ lib.ElWilkinson_c.argtypes = \ lib.ElWilkinson_z.argtypes = \ lib.ElWilkinsonDist_i.argtypes = \ lib.ElWilkinsonDist_s.argtypes = \ lib.ElWilkinsonDist_d.argtypes = \ lib.ElWilkinsonDist_c.argtypes = \ lib.ElWilkinsonDist_z.argtypes = \ [c_void_p,iType] def Wilkinson(A,k): args = [A.obj,k] if type(A) is Matrix: if A.tag == iTag: lib.ElWilkinson_i(*args) elif A.tag == sTag: lib.ElWilkinson_s(*args) elif A.tag == dTag: lib.ElWilkinson_d(*args) elif A.tag == cTag: lib.ElWilkinson_c(*args) elif A.tag == zTag: lib.ElWilkinson_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElWilkinsonDist_i(*args) elif A.tag == sTag: lib.ElWilkinsonDist_s(*args) elif A.tag == dTag: lib.ElWilkinsonDist_d(*args) elif A.tag == cTag: lib.ElWilkinsonDist_c(*args) elif A.tag == zTag: lib.ElWilkinsonDist_z(*args) else: DataExcept() else: TypeExcept() # Zeros # ----- lib.ElZeros_i.argtypes = \ lib.ElZeros_s.argtypes = \ lib.ElZeros_d.argtypes = \ lib.ElZeros_c.argtypes = \ lib.ElZeros_z.argtypes = \ lib.ElZerosDist_i.argtypes = \ lib.ElZerosDist_s.argtypes = \ lib.ElZerosDist_d.argtypes = \ lib.ElZerosDist_c.argtypes = \ lib.ElZerosDist_z.argtypes = \ lib.ElZerosSparse_i.argtypes = \ lib.ElZerosSparse_s.argtypes = \ lib.ElZerosSparse_d.argtypes = \ lib.ElZerosSparse_c.argtypes = \ lib.ElZerosSparse_z.argtypes = \ lib.ElZerosDistSparse_i.argtypes = \ lib.ElZerosDistSparse_s.argtypes = \ lib.ElZerosDistSparse_d.argtypes = \ lib.ElZerosDistSparse_c.argtypes = \ lib.ElZerosDistSparse_z.argtypes = \ lib.ElZerosDistMultiVec_i.argtypes = \ lib.ElZerosDistMultiVec_s.argtypes = \ lib.ElZerosDistMultiVec_d.argtypes = \ lib.ElZerosDistMultiVec_c.argtypes = \ lib.ElZerosDistMultiVec_z.argtypes = \ [c_void_p,iType,iType] def Zeros(A,m,n): args = [A.obj,m,n] if type(A) is Matrix: if A.tag == iTag: lib.ElZeros_i(*args) elif A.tag == sTag: lib.ElZeros_s(*args) elif A.tag == dTag: lib.ElZeros_d(*args) elif A.tag == cTag: lib.ElZeros_c(*args) elif A.tag == zTag: lib.ElZeros_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElZerosDist_i(*args) elif A.tag == sTag: lib.ElZerosDist_s(*args) elif A.tag == dTag: lib.ElZerosDist_d(*args) elif A.tag == cTag: lib.ElZerosDist_c(*args) elif A.tag == zTag: lib.ElZerosDist_z(*args) else: DataExcept() elif type(A) is SparseMatrix: if A.tag == iTag: lib.ElZerosSparse_i(*args) elif A.tag == sTag: lib.ElZerosSparse_s(*args) elif A.tag == dTag: lib.ElZerosSparse_d(*args) elif A.tag == cTag: lib.ElZerosSparse_c(*args) elif A.tag == zTag: lib.ElZerosSparse_z(*args) else: DataExcept() elif type(A) is DistSparseMatrix: if A.tag == iTag: lib.ElZerosDistSparse_i(*args) elif A.tag == sTag: lib.ElZerosDistSparse_s(*args) elif A.tag == dTag: lib.ElZerosDistSparse_d(*args) elif A.tag == cTag: lib.ElZerosDistSparse_c(*args) elif A.tag == zTag: lib.ElZerosDistSparse_z(*args) else: DataExcept() elif type(A) is DistMultiVec: if A.tag == iTag: lib.ElZerosDistMultiVec_i(*args) elif A.tag == sTag: lib.ElZerosDistMultiVec_s(*args) elif A.tag == dTag: lib.ElZerosDistMultiVec_d(*args) elif A.tag == cTag: lib.ElZerosDistMultiVec_c(*args) elif A.tag == zTag: lib.ElZerosDistMultiVec_z(*args) else: DataExcept() else: TypeExcept() # Random # ====== # Bernoulli # --------- lib.ElBernoulli_i.argtypes = \ lib.ElBernoulli_s.argtypes = \ lib.ElBernoulli_d.argtypes = \ lib.ElBernoulli_c.argtypes = \ lib.ElBernoulli_z.argtypes = \ lib.ElBernoulliDist_i.argtypes = \ lib.ElBernoulliDist_s.argtypes = \ lib.ElBernoulliDist_d.argtypes = \ lib.ElBernoulliDist_c.argtypes = \ lib.ElBernoulliDist_z.argtypes = \ [c_void_p,iType,iType] def Bernoulli(A,m,n): args = [A.obj,m,n] if type(A) is Matrix: if A.tag == iTag: lib.ElBernoulli_i(*args) elif A.tag == sTag: lib.ElBernoulli_s(*args) elif A.tag == dTag: lib.ElBernoulli_d(*args) elif A.tag == cTag: lib.ElBernoulli_c(*args) elif A.tag == zTag: lib.ElBernoulli_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElBernoulliDist_i(*args) elif A.tag == sTag: lib.ElBernoulliDist_s(*args) elif A.tag == dTag: lib.ElBernoulliDist_d(*args) elif A.tag == cTag: lib.ElBernoulliDist_c(*args) elif A.tag == zTag: lib.ElBernoulliDist_z(*args) else: DataExcept() else: TypeExcept() # Gaussian # -------- lib.ElGaussian_s.argtypes = [c_void_p,iType,iType,sType,sType] lib.ElGaussian_d.argtypes = [c_void_p,iType,iType,dType,dType] lib.ElGaussian_c.argtypes = [c_void_p,iType,iType,cType,sType] lib.ElGaussian_z.argtypes = [c_void_p,iType,iType,zType,dType] lib.ElGaussianDist_s.argtypes = [c_void_p,iType,iType,sType,sType] lib.ElGaussianDist_d.argtypes = [c_void_p,iType,iType,dType,dType] lib.ElGaussianDist_c.argtypes = [c_void_p,iType,iType,cType,sType] lib.ElGaussianDist_z.argtypes = [c_void_p,iType,iType,zType,dType] lib.ElGaussianDistMultiVec_s.argtypes = [c_void_p,iType,iType,sType,sType] lib.ElGaussianDistMultiVec_d.argtypes = [c_void_p,iType,iType,dType,dType] lib.ElGaussianDistMultiVec_c.argtypes = [c_void_p,iType,iType,cType,sType] lib.ElGaussianDistMultiVec_z.argtypes = [c_void_p,iType,iType,zType,dType] def Gaussian(A,m,n,meanPre=0,stddev=1): mean = TagToType(A.tag)(meanPre) args = [A.obj,m,n,mean,stddev] if type(A) is Matrix: if A.tag == sTag: lib.ElGaussian_s(*args) elif A.tag == dTag: lib.ElGaussian_d(*args) elif A.tag == cTag: lib.ElGaussian_c(*args) elif A.tag == zTag: lib.ElGaussian_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElGaussianDist_s(*args) elif A.tag == dTag: lib.ElGaussianDist_d(*args) elif A.tag == cTag: lib.ElGaussianDist_c(*args) elif A.tag == zTag: lib.ElGaussianDist_z(*args) else: DataExcept() elif type(A) is DistMultiVec: if A.tag == sTag: lib.ElGaussianDistMultiVec_s(*args) elif A.tag == dTag: lib.ElGaussianDistMultiVec_d(*args) elif A.tag == cTag: lib.ElGaussianDistMultiVec_c(*args) elif A.tag == zTag: lib.ElGaussianDistMultiVec_z(*args) else: DataExcept() else: TypeExcept() # Normal uniform spectrum # ----------------------- lib.ElNormalUniformSpectrum_c.argtypes = [c_void_p,iType,cType,sType] lib.ElNormalUniformSpectrum_z.argtypes = [c_void_p,iType,zType,dType] lib.ElNormalUniformSpectrumDist_c.argtypes = [c_void_p,iType,cType,sType] lib.ElNormalUniformSpectrumDist_z.argtypes = [c_void_p,iType,zType,dType] def NormalUniformSpectrum(A,n,centerPre=0,radius=1): center = TagToType(A.tag)(centerPre) args = [A.obj,n,center,radius] if type(A) is Matrix: if A.tag == cTag: lib.ElNormalUniformSpectrum_c(*args) elif A.tag == zTag: lib.ElNormalUniformSpectrum_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == cTag: lib.ElNormalUniformSpectrumDist_c(*args) elif A.tag == zTag: lib.ElNormalUniformSpectrumDist_z(*args) else: DataExcept() else: TypeExcept() # Three-valued # ------------ lib.ElThreeValued_i.argtypes = \ lib.ElThreeValued_s.argtypes = \ lib.ElThreeValued_d.argtypes = \ lib.ElThreeValued_c.argtypes = \ lib.ElThreeValued_z.argtypes = \ lib.ElThreeValuedDist_i.argtypes = \ lib.ElThreeValuedDist_s.argtypes = \ lib.ElThreeValuedDist_d.argtypes = \ lib.ElThreeValuedDist_c.argtypes = \ lib.ElThreeValuedDist_z.argtypes = \ [c_void_p,iType,iType,dType] def ThreeValued(A,m,n,p=2./3.): args = [A.obj,m,n,p] if type(A) is Matrix: if A.tag == iTag: lib.ElThreeValued_i(*args) elif A.tag == sTag: lib.ElThreeValued_s(*args) elif A.tag == dTag: lib.ElThreeValued_d(*args) elif A.tag == cTag: lib.ElThreeValued_c(*args) elif A.tag == zTag: lib.ElThreeValued_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElThreeValuedDist_i(*args) elif A.tag == sTag: lib.ElThreeValuedDist_s(*args) elif A.tag == dTag: lib.ElThreeValuedDist_d(*args) elif A.tag == cTag: lib.ElThreeValuedDist_c(*args) elif A.tag == zTag: lib.ElThreeValuedDist_z(*args) else: DataExcept() else: TypeExcept() # Uniform # ------- lib.ElUniform_i.argtypes = \ lib.ElUniformDist_i.argtypes = \ lib.ElUniformDistMultiVec_i.argtypes = \ [c_void_p,iType,iType,iType,iType] lib.ElUniform_s.argtypes = \ lib.ElUniformDist_s.argtypes = \ lib.ElUniformDistMultiVec_s.argtypes = \ [c_void_p,iType,iType,sType,sType] lib.ElUniform_d.argtypes = \ lib.ElUniformDist_d.argtypes = \ lib.ElUniformDistMultiVec_d.argtypes = \ [c_void_p,iType,iType,dType,dType] lib.ElUniform_c.argtypes = \ lib.ElUniformDist_c.argtypes = \ lib.ElUniformDistMultiVec_c.argtypes = \ [c_void_p,iType,iType,cType,sType] lib.ElUniform_z.argtypes = \ lib.ElUniformDist_z.argtypes = \ lib.ElUniformDistMultiVec_z.argtypes = \ [c_void_p,iType,iType,zType,dType] def Uniform(A,m,n,centerPre=0,radius=1): center = TagToType(A.tag)(centerPre) args = [A.obj,m,n,center,radius] if type(A) is Matrix: if A.tag == iTag: lib.ElUniform_i(*args) elif A.tag == sTag: lib.ElUniform_s(*args) elif A.tag == dTag: lib.ElUniform_d(*args) elif A.tag == cTag: lib.ElUniform_c(*args) elif A.tag == zTag: lib.ElUniform_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == iTag: lib.ElUniformDist_i(*args) elif A.tag == sTag: lib.ElUniformDist_s(*args) elif A.tag == dTag: lib.ElUniformDist_d(*args) elif A.tag == cTag: lib.ElUniformDist_c(*args) elif A.tag == zTag: lib.ElUniformDist_z(*args) else: DataExcept() elif type(A) is DistMultiVec: if A.tag == iTag: lib.ElUniformDistMultiVec_i(*args) elif A.tag == sTag: lib.ElUniformDistMultiVec_s(*args) elif A.tag == dTag: lib.ElUniformDistMultiVec_d(*args) elif A.tag == cTag: lib.ElUniformDistMultiVec_c(*args) elif A.tag == zTag: lib.ElUniformDistMultiVec_z(*args) else: DataExcept() else: TypeExcept() # Uniform Helmholtz Green's # ------------------------- lib.ElUniformHelmholtzGreens_c.argtypes = \ lib.ElUniformHelmholtzGreensDist_c.argtypes = \ [c_void_p,iType,sType] lib.ElUniformHelmholtzGreens_z.argtypes = \ lib.ElUniformHelmholtzGreensDist_z.argtypes = \ [c_void_p,iType,dType] def UniformHelmholtzGreens(A,n,lamb): args = [A.obj,n,lamb] if type(A) is Matrix: if A.tag == cTag: lib.ElUniformHelmholtzGreens_c(*args) elif A.tag == zTag: lib.ElUniformHelmholtzGreens_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == cTag: lib.ElUniformHelmholtzGreensDist_c(*args) elif A.tag == zTag: lib.ElUniformHelmholtzGreensDist_z(*args) else: DataExcept() else: TypeExcept() # Wigner # ------ lib.ElWigner_s.argtypes = \ lib.ElWignerDist_s.argtypes = \ [c_void_p,iType,sType,sType] lib.ElWigner_d.argtypes = \ lib.ElWignerDist_d.argtypes = \ [c_void_p,iType,dType,dType] lib.ElWigner_c.argtypes = \ lib.ElWignerDist_c.argtypes = \ [c_void_p,iType,cType,sType] lib.ElWigner_z.argtypes = \ lib.ElWignerDist_z.argtypes = \ [c_void_p,iType,zType,dType] def Wigner(A,n,meanPre=0,stddev=1): mean = TagToType(A.tag)(meanPre) args = [A.obj,n,mean,stddev] if type(A) is Matrix: if A.tag == sTag: lib.ElWigner_s(*args) elif A.tag == dTag: lib.ElWigner_d(*args) elif A.tag == cTag: lib.ElWigner_c(*args) elif A.tag == zTag: lib.ElWigner_z(*args) else: DataExcept() elif type(A) is DistMatrix: if A.tag == sTag: lib.ElWignerDist_s(*args) elif A.tag == dTag: lib.ElWignerDist_d(*args) elif A.tag == cTag: lib.ElWignerDist_c(*args) elif A.tag == zTag: lib.ElWignerDist_z(*args) else: DataExcept() else: TypeExcept()
justusc/Elemental
python/matrices.py
Python
bsd-3-clause
84,298
[ "Gaussian" ]
0dd3f1796a762fc86701d18229590969837d40634e315efee2d3cb40e676f4fa
import collections, logging, itertools import bashlex.parser import bashlex.ast from explainshell import errors, util, helpconstants class matchgroup(object): '''a class to group matchresults together we group all shell results in one group and create a new group for every command''' def __init__(self, name): self.name = name self.results = [] def __repr__(self): return '<matchgroup %r with %d results>' % (self.name, len(self.results)) class matchresult(collections.namedtuple('matchresult', 'start end text match')): @property def unknown(self): return self.text is None matchwordexpansion = collections.namedtuple('matchwordexpansion', 'start end kind') logger = logging.getLogger(__name__) class matcher(bashlex.ast.nodevisitor): '''parse a command line and return a list of matchresults describing each token. ''' def __init__(self, s, store): self.s = s.encode('latin1', 'replace') self.store = store self._prevoption = self._currentoption = None self.groups = [matchgroup('shell')] # a list of matchwordexpansions where expansions happened during word # expansion self.expansions = [] # a stack to manage nested command groups: whenever a new simple # command is started, we push a tuple with: # - the node that started this group. this is used to find it when # a command ends (see visitnodeend) # - its matchgroup. new matchresults will be added to it. # - a word used to end the top-most command. this is used when a flag # starts a new command, e.g. find -exec. self.groupstack = [(None, self.groups[-1], None)] # keep a stack of the currently visited compound command (if/for..) # to provide context when matching reserved words, since for example # the keyword 'done' can appear in a for, while.. self.compoundstack = [] # a set of functions defined in the current input, we will try to match # commands against them so if one refers to defined function, it won't # show up as unknown or be taken from the db self.functions = set() def _generatecommandgroupname(self): existing = len([g for g in self.groups if g.name.startswith('command')]) return 'command%d' % existing @property def matches(self): '''return the list of results from the most recently created group''' return self.groupstack[-1][1].results @property def allmatches(self): return list(itertools.chain.from_iterable(g.results for g in self.groups)) @property def manpage(self): group = self.groupstack[-1][1] # we do not have a manpage if the top of the stack is the shell group. # this can happen if the first argument is a command substitution # and we're not treating it as a "man page not found" if group.name != 'shell': return group.manpage def find_option(self, opt): self._currentoption = self.manpage.find_option(opt) logger.debug('looking up option %r, got %r', opt, self._currentoption) return self._currentoption def findmanpages(self, prog): prog = prog.decode('latin1') logger.info('looking up %r in store', prog) manpages = self.store.findmanpage(prog) logger.info('found %r in store, got: %r, using %r', prog, manpages, manpages[0]) return manpages def unknown(self, token, start, end): logger.debug('nothing to do with token %r', token) return matchresult(start, end, None, None) def visitreservedword(self, node, word): # first try the compound reserved words helptext = None if self.compoundstack: currentcompound = self.compoundstack[-1] helptext = helpconstants.COMPOUNDRESERVEDWORDS.get(currentcompound, {}).get(word) # try these if we don't have anything specific if not helptext: helptext = helpconstants.RESERVEDWORDS[word] self.groups[0].results.append(matchresult(node.pos[0], node.pos[1], helptext, None)) def visitoperator(self, node, op): helptext = None if self.compoundstack: currentcompound = self.compoundstack[-1] helptext = helpconstants.COMPOUNDRESERVEDWORDS.get(currentcompound, {}).get(op) if not helptext: helptext = helpconstants.OPERATORS[op] self.groups[0].results.append(matchresult(node.pos[0], node.pos[1], helptext, None)) def visitpipe(self, node, pipe): self.groups[0].results.append( matchresult(node.pos[0], node.pos[1], helpconstants.PIPELINES, None)) def visitredirect(self, node, input, type, output, heredoc): helptext = [helpconstants.REDIRECTION] if type == '>&' and isinstance(output, int): type = type[:-1] if type in helpconstants.REDIRECTION_KIND: helptext.append(helpconstants.REDIRECTION_KIND[type]) self.groups[0].results.append( matchresult(node.pos[0], node.pos[1], '\n\n'.join(helptext), None)) # the output might contain a wordnode, visiting it will confuse the # matcher who'll think it's an argument, instead visit the expansions # directly, if we have any if isinstance(output, bashlex.ast.node): for part in output.parts: self.visit(part) return False def visitcommand(self, node, parts): assert parts # look for the first WordNode, which might not be at parts[0] idxwordnode = bashlex.ast.findfirstkind(parts, 'word') if idxwordnode == -1: logger.info('no words found in command (probably contains only redirects)') return wordnode = parts[idxwordnode] # check if this refers to a previously defined function if wordnode.word in self.functions: logger.info('word %r is a function, not trying to match it or its ' 'arguments', wordnode) # first, add a matchresult for the function call mr = matchresult(wordnode.pos[0], wordnode.pos[1], helpconstants._functioncall % wordnode.word, None) self.matches.append(mr) # this is a bit nasty: if we were to visit the command like we # normally do it would try to match it against a manpage. but # we don't have one here, we just want to take all the words and # consider them part of the function call for part in parts: # maybe it's a redirect... if part.kind != 'word': self.visit(part) else: # this is an argument to the function if part is not wordnode: mr = matchresult(part.pos[0], part.pos[1], helpconstants._functionarg % wordnode.word, None) self.matches.append(mr) # visit any expansions in there for ppart in part.parts: self.visit(ppart) # we're done with this commandnode, don't visit its children return False self.startcommand(node, parts, None) def visitif(self, *args): self.compoundstack.append('if') def visitfor(self, node, parts): self.compoundstack.append('for') for part in parts: # don't visit words since they're not part of the current command, # instead consider them part of the for construct if part.kind == 'word': mr = matchresult(part.pos[0], part.pos[1], helpconstants._for, None) self.groups[0].results.append(mr) # but we do want to visit expanions for ppart in part.parts: self.visit(ppart) else: self.visit(part) return False def visitwhile(self, *args): self.compoundstack.append('while') def visituntil(self, *args): self.compoundstack.append('until') def visitnodeend(self, node): if node.kind == 'command': # it's possible for visitcommand/end to be called without a command # group being pushed if it contains only redirect nodes if len(self.groupstack) > 1: logger.info('visitnodeend %r, groups %d', node, len(self.groupstack)) while self.groupstack[-1][0] is not node: logger.info('popping groups that are a result of nested commands') self.endcommand() self.endcommand() elif node.kind in ('if', 'for', 'while', 'until'): kind = self.compoundstack.pop() assert kind == node.kind def startcommand(self, commandnode, parts, endword, addgroup=True): logger.info('startcommand commandnode=%r parts=%r, endword=%r, addgroup=%s', commandnode, parts, endword, addgroup) idxwordnode = bashlex.ast.findfirstkind(parts, 'word') assert idxwordnode != -1 wordnode = parts[idxwordnode] if wordnode.parts: logger.info('node %r has parts (it was expanded), no point in looking' ' up a manpage for it', wordnode) if addgroup: mg = matchgroup(self._generatecommandgroupname()) mg.manpage = None mg.suggestions = None self.groups.append(mg) self.groupstack.append((commandnode, mg, endword)) return False startpos, endpos = wordnode.pos try: mps = self.findmanpages(wordnode.word) # we consume this node here, pop it from parts so we # don't visit it again as an argument parts.pop(idxwordnode) except errors.ProgramDoesNotExist, e: if addgroup: # add a group for this command, we'll mark it as unknown # when visitword is called logger.info('no manpage found for %r, adding a group for it', wordnode.word) mg = matchgroup(self._generatecommandgroupname()) mg.error = e mg.manpage = None mg.suggestions = None self.groups.append(mg) self.groupstack.append((commandnode, mg, endword)) return False manpage = mps[0] idxnextwordnode = bashlex.ast.findfirstkind(parts, 'word') # check the next word for a possible multicommand if: # - the matched manpage says so # - we have another word node # - the word node has no expansions in it if manpage.multicommand and idxnextwordnode != -1 and not parts[idxnextwordnode].parts: nextwordnode = parts[idxnextwordnode] try: multi = '%s %s' % (wordnode.word, nextwordnode.word) logger.info('%r is a multicommand, trying to get another token and look up %r', manpage, multi) mps = self.findmanpages(multi) manpage = mps[0] # we consume this node here, pop it from parts so we # don't visit it again as an argument parts.pop(idxnextwordnode) endpos = nextwordnode.pos[1] except errors.ProgramDoesNotExist: logger.info('no manpage %r for multicommand %r', multi, manpage) # create a new matchgroup for the current command mg = matchgroup(self._generatecommandgroupname()) mg.manpage = manpage mg.suggestions = mps[1:] self.groups.append(mg) self.groupstack.append((commandnode, mg, endword)) self.matches.append(matchresult(startpos, endpos, manpage.synopsis or helpconstants.NOSYNOPSIS, None)) return True def endcommand(self): '''end the most recently created command group by popping it from the group stack. groups are created by visitcommand or a nested command''' assert len(self.groupstack) >= 2, 'groupstack must contain shell and command groups' g = self.groupstack.pop() logger.info('ending group %s', g) def visitcommandsubstitution(self, node, command): kind = self.s[node.pos[0]] substart = 2 if kind == '$' else 1 # start the expansion after the $( or ` self.expansions.append(matchwordexpansion(node.pos[0] + substart, node.pos[1] - 1, 'substitution')) # do not try to match the child nodes return False def visitprocesssubstitution(self, node, command): # don't include opening <( and closing ) self.expansions.append(matchwordexpansion(node.pos[0] + 2, node.pos[1] - 1, 'substitution')) # do not try to match the child nodes return False def visitassignment(self, node, word): helptext = helpconstants.ASSIGNMENT self.groups[0].results.append(matchresult(node.pos[0], node.pos[1], helptext, None)) def visitword(self, node, word): def attemptfuzzy(chars): m = [] if chars[0] == '-': tokens = [chars[0:2]] + list(chars[2:]) considerarg = True else: tokens = list(chars) considerarg = False pos = node.pos[0] prevoption = None for i, t in enumerate(tokens): op = t if t[0] == '-' else '-' + t option = self.find_option(op) if option: if considerarg and not m and option.expectsarg: logger.info('option %r expected an arg, taking the rest too', option) # reset the current option if we already took an argument, # this prevents the next word node to also consider itself # as an argument self._currentoption = None return [matchresult(pos, pos+len(chars), option.text, None)] mr = matchresult(pos, pos+len(t), option.text, None) m.append(mr) # if the previous option expected an argument and we couldn't # match the current token, take the rest as its argument, this # covers a series of short options where the last one has an argument # with no space between it, such as 'xargs -r0n1' elif considerarg and prevoption and prevoption.expectsarg: pmr = m[-1] mr = matchresult(pmr.start, pmr.end+(len(tokens)-i), pmr.text, None) m[-1] = mr # reset the current option if we already took an argument, # this prevents the next word node to also consider itself # as an argument self._currentoption = None break else: m.append(self.unknown(t, pos, pos+len(t))) pos += len(t) prevoption = option return m def _visitword(node, word): if not self.manpage: logger.info('inside an unknown command, giving up on %r', word) self.matches.append(self.unknown(word, node.pos[0], node.pos[1])) return logger.info('trying to match token: %r', word) self._prevoption = self._currentoption if word.startswith('--'): word = word.split('=', 1)[0] option = self.find_option(word) if option: logger.info('found an exact match for %r: %r', word, option) mr = matchresult(node.pos[0], node.pos[1], option.text, None) self.matches.append(mr) # check if we splitted the word just above, if we did then reset # the current option so the next word doesn't consider itself # an argument if word != node.word: self._currentoption = None else: word = node.word # check if we're inside a nested command and this word marks the end if isinstance(self.groupstack[-1][-1], list) and word in self.groupstack[-1][-1]: logger.info('token %r ends current nested command', word) self.endcommand() mr = matchresult(node.pos[0], node.pos[1], self.matches[-1].text, None) self.matches.append(mr) elif word != '-' and word.startswith('-') and not word.startswith('--'): logger.debug('looks like a short option') if len(word) > 2: logger.info("trying to split it up") self.matches.extend(attemptfuzzy(word)) else: self.matches.append(self.unknown(word, node.pos[0], node.pos[1])) elif self._prevoption and self._prevoption.expectsarg: logger.info("previous option possibly expected an arg, and we can't" " find an option to match the current token, assuming it's an arg") ea = self._prevoption.expectsarg possibleargs = ea if isinstance(ea, list) else [] take = True if possibleargs and word not in possibleargs: take = False logger.info('token %r not in list of possible args %r for %r', word, possibleargs, self._prevoption) if take: if self._prevoption.nestedcommand: logger.info('option %r can nest commands', self._prevoption) if self.startcommand(None, [node], self._prevoption.nestedcommand, addgroup=False): self._currentoption = None return pmr = self.matches[-1] mr = matchresult(pmr.start, node.pos[1], pmr.text, None) self.matches[-1] = mr else: self.matches.append(self.unknown(word, node.pos[0], node.pos[1])) else: if self.manpage.partialmatch: logger.info('attemping to do a partial match') m = attemptfuzzy(word) if not any(mm.unknown for mm in m): logger.info('found a match for everything, taking it') self.matches.extend(m) return if self.manpage.arguments: if self.manpage.nestedcommand: logger.info('manpage %r can nest commands', self.manpage) if self.startcommand(None, [node], self.manpage.nestedcommand, addgroup=False): self._currentoption = None return d = self.manpage.arguments k = list(d.keys())[0] logger.info('got arguments, using %r', k) text = d[k] mr = matchresult(node.pos[0], node.pos[1], text, None) self.matches.append(mr) return # if all of that failed, we can't explain it so mark it unknown self.matches.append(self.unknown(word, node.pos[0], node.pos[1])) _visitword(node, word) def visitfunction(self, node, name, body, parts): self.functions.add(name.word) def _iscompoundopenclosecurly(compound): first, last = compound.list[0], compound.list[-1] if (first.kind == 'reservedword' and last.kind == 'reservedword' and first.word == '{' and last.word == '}'): return True # if the compound command we have there is { }, let's include the # {} as part of the function declaration. normally it would be # treated as a group command, but that seems less informative in this # context if _iscompoundopenclosecurly(body): # create a matchresult until after the first { mr = matchresult(node.pos[0], body.list[0].pos[1], helpconstants._function, None) self.groups[0].results.append(mr) # create a matchresult for the closing } mr = matchresult(body.list[-1].pos[0], body.list[-1].pos[1], helpconstants._function, None) self.groups[0].results.append(mr) # visit anything in between the { } for part in body.list[1:-1]: self.visit(part) else: beforebody = bashlex.ast.findfirstkind(parts, 'compound') - 1 assert beforebody > 0 beforebody = parts[beforebody] # create a matchresult ending at the node before body mr = matchresult(node.pos[0], beforebody.pos[1], helpconstants._function, None) self.groups[0].results.append(mr) self.visit(body) return False def visittilde(self, node, value): self.expansions.append(matchwordexpansion(node.pos[0], node.pos[1], 'tilde')) def visitparameter(self, node, value): try: int(value) kind = 'digits' except ValueError: kind = helpconstants.parameters.get(value, 'param') self.expansions.append(matchwordexpansion(node.pos[0], node.pos[1], 'parameter-%s' % kind)) def match(self): logger.info('matching string %r', self.s) # limit recursive parsing to a depth of 1 self.ast = bashlex.parser.parsesingle(self.s, expansionlimit=1, strictmode=False) if self.ast: self.visit(self.ast) assert len(self.groupstack) == 1, 'groupstack should contain only shell group after matching' # if we only have one command in there and no shell results/expansions, # reraise the original exception if (len(self.groups) == 2 and not self.groups[0].results and self.groups[1].manpage is None and not self.expansions): raise self.groups[1].error else: logger.warn('no AST generated for %r', self.s) def debugmatch(): s = '\n'.join(['%d) %r = %r' % (i, self.s[m.start:m.end], m.text) for i, m in enumerate(self.allmatches)]) return s self._markunparsedunknown() # fix each matchgroup seperately for group in self.groups: if group.results: if getattr(group, 'manpage', None): # ensure that the program part isn't unknown (i.e. it has # something as its synopsis) assert not group.results[0].unknown group.results = self._mergeadjacent(group.results) # add matchresult.match to existing matches for i, m in enumerate(group.results): assert m.end <= len(self.s), '%d %d' % (m.end, len(self.s)) portion = self.s[m.start:m.end].decode('latin1') group.results[i] = matchresult(m.start, m.end, m.text, portion) logger.debug('%r matches:\n%s', self.s, debugmatch()) # not strictly needed, but doesn't hurt self.expansions.sort() return self.groups def _markunparsedunknown(self): '''the parser may leave a remainder at the end of the string if it doesn't match any of the rules, mark them as unknowns''' parsed = [False]*len(self.s) # go over all existing matches to see if we've covered the # current position for start, end, _, _ in self.allmatches: for i in range(start, end): parsed[i] = True for i in range(len(parsed)): c = self.s[i] # whitespace is always 'unparsed' if c.isspace(): parsed[i] = True # the parser ignores comments but we can use a trick to see if this # starts a comment and is beyond the ending index of the parsed # portion of the inpnut if (not self.ast or i > self.ast.pos[1]) and c == '#': comment = matchresult(i, len(parsed), helpconstants.COMMENT, None) self.groups[0].results.append(comment) break if not parsed[i]: # add unparsed results to the 'shell' group self.groups[0].results.append(self.unknown(c, i, i+1)) # there are no overlaps, so sorting by the start is enough self.groups[0].results.sort(key=lambda mr: mr.start) def _resultindex(self): '''return a mapping of matchresults to their index among all matches, sorted by the start position of the matchresult''' d = {} i = 0 for result in sorted(self.allmatches, key=lambda mr: mr.start): d[result] = i i += 1 return d def _mergeadjacent(self, matches): merged = [] resultindex = self._resultindex() sametext = itertools.groupby(matches, lambda m: m.text) for text, ll in sametext: for l in util.groupcontinuous(ll, key=lambda m: resultindex[m]): if len(l) == 1: merged.append(l[0]) else: start = l[0].start end = l[-1].end endindex = resultindex[l[-1]] for mr in l: del resultindex[mr] merged.append(matchresult(start, end, text, None)) resultindex[merged[-1]] = endindex return merged
Mtax/explainshell
explainshell/matcher.py
Python
gpl-3.0
26,896
[ "VisIt" ]
8cb67a9ae1441f1a4d7f0f6802197b3a50dfea3ba5967894af804bbe17388c22
#!/usr/bin/env python """ Archive a transformation """ from DIRAC.Core.Base.Script import Script @Script() def main(): # Registering arguments will automatically add their description to the help menu Script.registerArgument(["transID: transformation ID"]) _, args = Script.parseCommandLine() transIDs = [int(arg) for arg in args] from DIRAC.TransformationSystem.Agent.TransformationCleaningAgent import TransformationCleaningAgent agent = TransformationCleaningAgent( "Transformation/TransformationCleaningAgent", "Transformation/TransformationCleaningAgent", "dirac-transformation-archive", ) agent.initialize() for transID in transIDs: agent.archiveTransformation(transID) if __name__ == "__main__": main()
DIRACGrid/DIRAC
src/DIRAC/TransformationSystem/scripts/dirac_transformation_archive.py
Python
gpl-3.0
790
[ "DIRAC" ]
1506ed5703e693978f95bbba2ae461cc7836205a30b2dda14fa258cf13202e41
# -*- coding: utf-8 -*- """ Created on Thu Jun 25 17:48:33 2015 @author: timmonen """ import sys import os import subprocess as sp import argparse import pysam from pipeline.sample import Sample # Functions def pre_map(sample, VERBOSE=0,**kwargs): fn_in = sample.get_read_filenames(gzip=True, trimmed=True)['data'] fn_outd = sample.get_pre_map_filename() fn_out = fn_outd['data'] fn_outs = fn_outd['summary'] sp.call(['/home/timmonen/bin/stampy-1.0.27/stampy.py', '--species', 'HIV', '--assembly', 'refseq', '-G', sample.get_data_foldername() +'NL43', sample.get_data_foldername() + 'NL4-3.fasta']) sp.call(['/home/timmonen/bin/stampy-1.0.27/stampy.py','-g', sample.get_data_foldername() + 'NL43', '-H', sample.get_data_foldername() + 'NL43']) sp.call(['/home/timmonen/bin/stampy-1.0.27/stampy.py', '-o', fn_out, '-g', sample.get_data_foldername() + 'NL43', '-h', sample.get_data_foldername() + 'NL43', '-M', fn_in[0],fn_in[1]]) n_reads = 0 with pysam.Samfile(fn_out, 'r') as samfile: for ir, read in enumerate(samfile): n_reads += 1 # Write summary file summary = {'sample name': sample.name, 'number of read pairs': (ir + 1)/2,} sample.write_json(summary, fn_outs) # Script if __name__ == '__main__': # Parse input args parser = argparse.ArgumentParser(description='Pre map reads') parser.add_argument('--sample', required=True, help='MiSeq sample to analyze') parser.add_argument('--verbose', type=int, default=0, help='Verbosity level [0-3]') args = parser.parse_args() sample = Sample(args.sample) sample.pre_map(VERBOSE=args.verbose)
timmonen/pipeline
pipeline/pre_map.py
Python
mit
1,789
[ "pysam" ]
788fe2346072dff99cf6d67961d6fa1021e19f030ccc293312be8e7bfc8c7cff
# -*- coding: utf-8 -*- # Copyright (c) 2015-2022, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """Tests for computing orbitals, densities and orbital angular momenta.""" import numpy as np from unittest import TestCase from exatomic import Universe, nwchem, molcas from exatomic.base import resource from exatomic.algorithms.orbital_util import compare_fields from exatomic.algorithms.orbital import (add_molecular_orbitals, add_orb_ang_mom, add_density) class TestMolcasOrbital(TestCase): def setUp(self): self.chk = Universe.load(resource('mol-carbon-dz-valid.hdf5')) kws = {'field_params': self.chk.field.loc[0], 'verbose': False} uni = Universe.load(resource('mol-carbon-dz.hdf5')) add_molecular_orbitals(uni, vector=range(5), **kws) add_density(uni, mocoefs='coef', **kws) add_orb_ang_mom(uni, rcoefs='lreal', icoefs='limag', **kws) add_density(uni, mocoefs='sx', **kws) add_density(uni, mocoefs='sy', **kws) add_density(uni, mocoefs='sz', **kws) self.uni = uni def test_compare_fields(self): res = compare_fields(self.uni, self.chk, verbose=False) self.assertTrue(np.isclose(len(res), sum(res), rtol=5e-4)) class TestADFOrbital(TestCase): def test_compare_fields(self): chk = Universe.load(resource('adf-lu-valid.hdf5')) uni = Universe.load(resource('adf-lu.hdf5')) uni.add_molecular_orbitals(vector=range(8, 60), verbose=False, field_params=chk.field.loc[0]) res = compare_fields(chk, uni, signed=False, verbose=False) self.assertTrue(np.isclose(len(res), sum(res), rtol=5e-4)) class TestNWChemOrbital(TestCase): def test_compare_fields(self): nw = nwchem.Output(resource('nw-ch3nh2-631g.out')).to_universe() mo = molcas.Output(resource('mol-ch3nh2-631g.out')) mo.add_orb(resource('mol-ch3nh2-631g.scforb')) mo = mo.to_universe() nw.add_molecular_orbitals(vector=range(3, 10), verbose=False) mo.add_molecular_orbitals(vector=range(3, 10), verbose=False) res = compare_fields(nw, mo, signed=False, rtol=5e-3) self.assertTrue(np.isclose(sum(res), len(res), rtol=5e-3))
exa-analytics/exatomic
exatomic/algorithms/tests/test_orbital.py
Python
apache-2.0
2,362
[ "ADF", "MOLCAS", "NWChem" ]
328246f8d8b3c5cd52b3816c3d7b82de946c10ac9ccebcd8224ee1a27d1b8437
#! /usr/bin/python3 -B # *************************************************** * # This file is a part of ccROJ project * # distributed under GNU General Public License v3.0. * # Please visit the webpage: github.com/dsp-box/ccROJ * # for more information. * # contact: Krzysztof Czarnecki * # email: czarnecki.krzysiek@gmail.com * # *************************************************** */ # *************************************************** */ # import from subprocess import Popen, PIPE from linecache import getline from getopt import getopt import re, sys # *************************************************** */ # help def print_help(keys): print("draw-image.py\n\nhas possible the following options:") for k in keys: print("--", "\b"+re.sub("=", " arg", k)) print("\nexample:") print(" ./draw-image.py \\") print(" --infile data-c-rate.txt \\") print(" --zlabel 'chirp-rate (Hz/s)' \\") print(" --ylabel 'frequency (kHz)' \\") print(" --xlabel 'time (s)' \\") print(" --yfactor 0.001 \\") print(" --format png \\") print(" --min 200 \\") print(" --max 1200 \\") sys.exit(0) # *************************************************** */ # init guplot def init_gnuplot(): proc = Popen(["gnuplot","--persist"], shell=True, stdin=PIPE, stdout=None, stderr=None, close_fds=True ) def write_to_gnuplot(command): print("(debug)", command) commandn = command + "\n" proc.stdin.write(commandn.encode("UTF-8")) return write_to_gnuplot write_to_gnuplot = init_gnuplot() # *************************************************** */ # options # keys list contain possible options keys = [] # file with data to plot which should have the following format: # #X_MIN=xmin # #X_MAX=xmax # #Y_MIN=ymin # #Y_MAX=ymax # x0 y0 z00 # x0 y1 z01 # ... # x0 y9 z09 # # x1 y0 z10 # x1 y1 z11 # ... # x1 y9 z19 # ... # #Z_MIN=zmin # #Z_MAX=zmax keys.append("infile=") # name of output file (extension can be skiped) keys.append("outfile=") # file extension. 2 are possible: png and eps keys.append("format=") # minimal and maximal values keys.extend(["min=", "max="]) keys.extend(["xmin=", "xmax="]) keys.extend(["ymin=", "ymax="]) # switch on logarithmic mode keys.append("log") # labels keys.extend(["xlabel=", "ylabel=", "zlabel="]) # factors keys.extend(["xfactor=", "yfactor=", "zfactor="]) # extra keys.append("extra=") # help keys.append("help") # *************************************************** */ # parse command line arg_format = "png" arg_infile = None arg_outfile = None arg_min = None arg_max = None arg_log = False arg_xmin = None arg_xmax = None arg_ymin = None arg_ymax = None arg_xlabel = "X" arg_ylabel = "Y" arg_zlabel = "Z" arg_xfactor = 1 arg_yfactor = 1 arg_zfactor = 1 arg_extra = "" opts, args = getopt(sys.argv[1:], "", keys) for opt,arg in opts: if opt == "--format": arg_format = arg if opt == "--infile": arg_infile = arg if opt == "--outfile": arg_outfile = arg if opt == "--min": arg_min = float(arg) if opt == "--max": arg_max = float(arg) if opt == "--log": arg_log = True if opt == "--help": print_help(keys) if opt == "--extra": arg_extra = arg if opt == "--xmin": arg_xmin = float(arg) if opt == "--xmax": arg_xmax = float(arg) if opt == "--ymin": arg_ymin = float(arg) if opt == "--ymax": arg_ymax = float(arg) if opt == "--xlabel": arg_xlabel = arg if opt == "--ylabel": arg_ylabel = arg if opt == "--zlabel": arg_zlabel = arg if opt == "--xfactor": arg_xfactor = float(arg) if opt == "--yfactor": arg_yfactor = float(arg) if opt == "--zfactor": arg_zfactor = float(arg) # *************************************************** */ # check command line write_to_gnuplot("set fontpath '/usr/share/matplotlib/mpl-data/fonts/ttf/cmr10.ttf'") if arg_format == "png": write_to_gnuplot("set term pngcairo dashed size 1600,900 font 'cmr10, 32'") elif arg_format == "eps": write_to_gnuplot("set term postscript eps enhanced color font 'cmr10, 14' size 9.5cm,7.0cm") # you can convert eps to png e.g. by: # convert -density 300 img-energy.eps -resize 800x600 -flatten -colorspace RGB img-energy.png else: assert False assert arg_infile != None if arg_outfile == None: arg_outfile = arg_infile if arg_outfile[-4:] == "."+arg_format: write_to_gnuplot("set output '%s'" % arg_outfile) else: write_to_gnuplot("set output '%s.%s'" % (arg_outfile, arg_format)) assert arg_min != None if arg_log: arg_max=0 assert arg_min < arg_max # *************************************************** */ # set margins def set_margins(l,r,b,t): write_to_gnuplot("set lmargin at screen %g" % l) write_to_gnuplot("set rmargin at screen %g" % r) write_to_gnuplot("set bmargin at screen %g" % b) write_to_gnuplot("set tmargin at screen %g" % t) set_margins(0.15, 0.8, 0.18, 0.955) # *************************************************** */ # const settings write_to_gnuplot("set pm3d map") write_to_gnuplot("unset grid") # *************************************************** */ # ranges if arg_xmin != None: xmin = arg_xmin else: line = getline(arg_infile, 1) xmin = float(re.findall("(?<=X_MIN=).*", line)[0]) if arg_xmax != None: xmax = arg_xmax else: line = getline(arg_infile, 2) xmax = float(re.findall("(?<=X_MAX=).*", line)[0]) write_to_gnuplot("set xrange [%g : %g]" % (xmin*arg_xfactor, xmax*arg_xfactor)) if arg_ymin != None: ymin = arg_ymin else: line = getline(arg_infile, 3) ymin = float(re.findall("(?<=Y_MIN=).*", line)[0]) if arg_ymax != None: ymax = arg_ymax else: line = getline(arg_infile, 4) ymax = float(re.findall("(?<=Y_MAX=).*", line)[0]) write_to_gnuplot("set yrange [%g : %g]" % (ymin*arg_yfactor, ymax*arg_yfactor)) write_to_gnuplot("set cbrange [%g : %g]" % (arg_min*arg_zfactor, arg_max*arg_zfactor)) if arg_log: ln = sum(1 for line in open(arg_infile)) line = getline(arg_infile, ln) log_max = float(re.findall("(?<=Z_MAX=).*", line)[0]) # *************************************************** */ # palette roj_palette = """\ 0.0 1.0 1.0 1.0, \ 0.5 0.9 0.0 0.0, \ 2.5 1.0 0.2 0.0, \ 3.3 0.9 0.8 0.0, \ 3.9 0.3 1.0 0.0, \ 4.0 0.0 1.0 0.0, \ 4.15 0.0 1.0 0.4, \ 4.8 0.0 0.5 1.0, \ 5.5 0.0 0.2 1.0, \ 7.5 0.0 0.1 0.8, \ 8.0 1.0 1.0 1.0""" if arg_log: write_to_gnuplot("set palette rgbformulae 30,31,32 negative") else: write_to_gnuplot("set palette defined(%s) negative" % roj_palette) # *************************************************** */ # labels write_to_gnuplot("set xlabel '%s'" % arg_xlabel) write_to_gnuplot("set ylabel '%s' offset -1,0" % arg_ylabel) write_to_gnuplot("set cblabel '%s' offset 1,0" % arg_zlabel) # *************************************************** */ # extra if arg_extra: write_to_gnuplot(arg_extra) # *************************************************** */ # ploting if arg_log: write_to_gnuplot("splot '%s' u (%g*$1):(%g*$2):(10*log10($3/%g)) notitle" % (arg_infile, arg_xfactor, arg_yfactor, log_max)) else: write_to_gnuplot("splot '%s' u (%g*$1):(%g*$2):(%g*$3) notitle" % (arg_infile, arg_xfactor, arg_yfactor, arg_zfactor)) write_to_gnuplot("print 'done!'") print("(info) The process goes down to the background. Wait a moment for results. It can takes few minutes...")
dsp-box/ccROJ
manual/draw-image.py
Python
gpl-3.0
7,574
[ "VisIt" ]
efd8ad8d652dbcffb17de0f53bdce67906d11b82b1b0677effb3dc5bf24e1b9d
from openmmtools import hmc_integrators, testsystems from simtk.openmm import app import simtk.openmm as mm from simtk import unit as u timestep = 1.0 * u.femtoseconds target_length = 200 * u.nanoseconds steps_per_hmc = 12 n_steps = int(target_length / timestep) output_frequency = 20 #barostat_frequency = 4 barostat_frequency = 1 print(n_steps, output_frequency, barostat_frequency) collision_rate = 1.0 / u.picoseconds temperature = 300. * u.kelvin pressure = 1.0 * u.atmospheres cutoff = 0.95 * u.nanometers prmtop_filename = "./input/126492-54-4_1000_300.6.prmtop" # cp ~/src/kyleabeauchamp/LiquidBenchmark/liquid_benchmark_3_14/tleap/126492-54-4_1000_300.6.prmtop ./ pdb_filename = "./input/126492-54-4_1000_300.6_equil.pdb" # cp ~/src/kyleabeauchamp/LiquidBenchmark/liquid_benchmark_3_14/equil/126492-54-4_1000_300.6_equil.pdb ./ log_filename = "./production/production_%0.2f.log" % (timestep / u.femtoseconds) pdb = app.PDBFile(pdb_filename) prmtop = app.AmberPrmtopFile(prmtop_filename) system = prmtop.createSystem(nonbondedMethod=app.PME, nonbondedCutoff=cutoff, constraints=app.HBonds) #integrator = hmc_integrators.GHMC2(temperature, steps_per_hmc, timestep, collision_rate) integrator = mm.LangevinIntegrator(temperature, 1.0 / u.picoseconds, timestep) #system.addForce(mm.MonteCarloBarostat(pressure, temperature, barostat_frequency)) simulation = app.Simulation(prmtop.topology, system, integrator) simulation.context.setPositions(pdb.positions) simulation.context.setPeriodicBoxVectors(*pdb.topology.getPeriodicBoxVectors()) simulation.context.setVelocitiesToTemperature(temperature) simulation.step(100) simulation.reporters.append(app.StateDataReporter(open(log_filename, 'w'), output_frequency, step=True, time=True, speed=True, density=True, potentialEnergy=True, kineticEnergy=True)) #simulation.step(n_steps) simulation.step(1000)
kyleabeauchamp/HMCNotes
code/old/production_density_amber.py
Python
gpl-2.0
1,873
[ "OpenMM" ]
24eefe68f040aff9a8b15f9ff3b7277196ea5bc74247023f03fc8ccff89e53e0
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Unit tests for the Pipeline class.""" import logging import platform import unittest # TODO(BEAM-1555): Test is failing on the service, with FakeSource. # from nose.plugins.attrib import attr import apache_beam as beam from apache_beam.io import Read from apache_beam.metrics import Metrics from apache_beam.pipeline import Pipeline from apache_beam.pipeline import PipelineOptions from apache_beam.pipeline import PipelineVisitor from apache_beam.pvalue import AsSingleton from apache_beam.runners.dataflow.native_io.iobase import NativeSource from apache_beam.testing.test_pipeline import TestPipeline from apache_beam.testing.util import assert_that from apache_beam.testing.util import equal_to from apache_beam.transforms import CombineGlobally from apache_beam.transforms import Create from apache_beam.transforms import FlatMap from apache_beam.transforms import Map from apache_beam.transforms import DoFn from apache_beam.transforms import ParDo from apache_beam.transforms import PTransform from apache_beam.transforms import WindowInto from apache_beam.transforms.window import SlidingWindows from apache_beam.transforms.window import TimestampedValue from apache_beam.utils.timestamp import MIN_TIMESTAMP class FakeSource(NativeSource): """Fake source returning a fixed list of values.""" class _Reader(object): def __init__(self, vals): self._vals = vals self._output_counter = Metrics.counter('main', 'outputs') def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): pass def __iter__(self): for v in self._vals: self._output_counter.inc() yield v def __init__(self, vals): self._vals = vals def reader(self): return FakeSource._Reader(self._vals) class PipelineTest(unittest.TestCase): @staticmethod def custom_callable(pcoll): return pcoll | '+1' >> FlatMap(lambda x: [x + 1]) # Some of these tests designate a runner by name, others supply a runner. # This variation is just to verify that both means of runner specification # work and is not related to other aspects of the tests. class CustomTransform(PTransform): def expand(self, pcoll): return pcoll | '+1' >> FlatMap(lambda x: [x + 1]) class Visitor(PipelineVisitor): def __init__(self, visited): self.visited = visited self.enter_composite = [] self.leave_composite = [] def visit_value(self, value, _): self.visited.append(value) def enter_composite_transform(self, transform_node): self.enter_composite.append(transform_node) def leave_composite_transform(self, transform_node): self.leave_composite.append(transform_node) def test_create(self): pipeline = TestPipeline() pcoll = pipeline | 'label1' >> Create([1, 2, 3]) assert_that(pcoll, equal_to([1, 2, 3])) # Test if initial value is an iterator object. pcoll2 = pipeline | 'label2' >> Create(iter((4, 5, 6))) pcoll3 = pcoll2 | 'do' >> FlatMap(lambda x: [x + 10]) assert_that(pcoll3, equal_to([14, 15, 16]), label='pcoll3') pipeline.run() def test_flatmap_builtin(self): pipeline = TestPipeline() pcoll = pipeline | 'label1' >> Create([1, 2, 3]) assert_that(pcoll, equal_to([1, 2, 3])) pcoll2 = pcoll | 'do' >> FlatMap(lambda x: [x + 10]) assert_that(pcoll2, equal_to([11, 12, 13]), label='pcoll2') pcoll3 = pcoll2 | 'm1' >> Map(lambda x: [x, 12]) assert_that(pcoll3, equal_to([[11, 12], [12, 12], [13, 12]]), label='pcoll3') pcoll4 = pcoll3 | 'do2' >> FlatMap(set) assert_that(pcoll4, equal_to([11, 12, 12, 12, 13]), label='pcoll4') pipeline.run() def test_create_singleton_pcollection(self): pipeline = TestPipeline() pcoll = pipeline | 'label' >> Create([[1, 2, 3]]) assert_that(pcoll, equal_to([[1, 2, 3]])) pipeline.run() # TODO(BEAM-1555): Test is failing on the service, with FakeSource. # @attr('ValidatesRunner') def test_metrics_in_source(self): pipeline = TestPipeline() pcoll = pipeline | Read(FakeSource([1, 2, 3, 4, 5, 6])) assert_that(pcoll, equal_to([1, 2, 3, 4, 5, 6])) res = pipeline.run() metric_results = res.metrics().query() outputs_counter = metric_results['counters'][0] self.assertEqual(outputs_counter.key.step, 'Read') self.assertEqual(outputs_counter.key.metric.name, 'outputs') self.assertEqual(outputs_counter.committed, 6) def test_read(self): pipeline = TestPipeline() pcoll = pipeline | 'read' >> Read(FakeSource([1, 2, 3])) assert_that(pcoll, equal_to([1, 2, 3])) pipeline.run() def test_visit_entire_graph(self): pipeline = Pipeline() pcoll1 = pipeline | 'pcoll' >> Create([1, 2, 3]) pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1]) pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1]) pcoll4 = pcoll2 | 'do3' >> FlatMap(lambda x: [x + 1]) transform = PipelineTest.CustomTransform() pcoll5 = pcoll4 | transform visitor = PipelineTest.Visitor(visited=[]) pipeline.visit(visitor) self.assertEqual(set([pcoll1, pcoll2, pcoll3, pcoll4, pcoll5]), set(visitor.visited)) self.assertEqual(set(visitor.enter_composite), set(visitor.leave_composite)) self.assertEqual(3, len(visitor.enter_composite)) self.assertEqual(visitor.enter_composite[2].transform, transform) self.assertEqual(visitor.leave_composite[1].transform, transform) def test_apply_custom_transform(self): pipeline = TestPipeline() pcoll = pipeline | 'pcoll' >> Create([1, 2, 3]) result = pcoll | PipelineTest.CustomTransform() assert_that(result, equal_to([2, 3, 4])) pipeline.run() def test_reuse_custom_transform_instance(self): pipeline = Pipeline() pcoll1 = pipeline | 'pcoll1' >> Create([1, 2, 3]) pcoll2 = pipeline | 'pcoll2' >> Create([4, 5, 6]) transform = PipelineTest.CustomTransform() pcoll1 | transform with self.assertRaises(RuntimeError) as cm: pipeline.apply(transform, pcoll2) self.assertEqual( cm.exception.message, 'Transform "CustomTransform" does not have a stable unique label. ' 'This will prevent updating of pipelines. ' 'To apply a transform with a specified label write ' 'pvalue | "label" >> transform') def test_reuse_cloned_custom_transform_instance(self): pipeline = TestPipeline() pcoll1 = pipeline | 'pc1' >> Create([1, 2, 3]) pcoll2 = pipeline | 'pc2' >> Create([4, 5, 6]) transform = PipelineTest.CustomTransform() result1 = pcoll1 | transform result2 = pcoll2 | 'new_label' >> transform assert_that(result1, equal_to([2, 3, 4]), label='r1') assert_that(result2, equal_to([5, 6, 7]), label='r2') pipeline.run() def test_transform_no_super_init(self): class AddSuffix(PTransform): def __init__(self, suffix): # No call to super(...).__init__ self.suffix = suffix def expand(self, pcoll): return pcoll | Map(lambda x: x + self.suffix) self.assertEqual( ['a-x', 'b-x', 'c-x'], sorted(['a', 'b', 'c'] | 'AddSuffix' >> AddSuffix('-x'))) def test_memory_usage(self): try: import resource except ImportError: # Skip the test if resource module is not available (e.g. non-Unix os). self.skipTest('resource module not available.') if platform.mac_ver()[0]: # Skip the test on macos, depending on version it returns ru_maxrss in # different units. self.skipTest('ru_maxrss is not in standard units.') def get_memory_usage_in_bytes(): return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * (2 ** 10) def check_memory(value, memory_threshold): memory_usage = get_memory_usage_in_bytes() if memory_usage > memory_threshold: raise RuntimeError( 'High memory usage: %d > %d' % (memory_usage, memory_threshold)) return value len_elements = 1000000 num_elements = 10 num_maps = 100 pipeline = TestPipeline() # Consumed memory should not be proportional to the number of maps. memory_threshold = ( get_memory_usage_in_bytes() + (5 * len_elements * num_elements)) # Plus small additional slack for memory fluctuations during the test. memory_threshold += 10 * (2 ** 20) biglist = pipeline | 'oom:create' >> Create( ['x' * len_elements] * num_elements) for i in range(num_maps): biglist = biglist | ('oom:addone-%d' % i) >> Map(lambda x: x + 'y') result = biglist | 'oom:check' >> Map(check_memory, memory_threshold) assert_that(result, equal_to( ['x' * len_elements + 'y' * num_maps] * num_elements)) pipeline.run() def test_aggregator_empty_input(self): actual = [] | CombineGlobally(max).without_defaults() self.assertEqual(actual, []) def test_pipeline_as_context(self): def raise_exception(exn): raise exn with self.assertRaises(ValueError): with Pipeline() as p: # pylint: disable=expression-not-assigned p | Create([ValueError('msg')]) | Map(raise_exception) # TODO(BEAM-1894). # def test_eager_pipeline(self): # p = Pipeline('EagerRunner') # self.assertEqual([1, 4, 9], p | Create([1, 2, 3]) | Map(lambda x: x*x)) class DoFnTest(unittest.TestCase): def test_element(self): class TestDoFn(DoFn): def process(self, element): yield element + 10 pipeline = TestPipeline() pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn()) assert_that(pcoll, equal_to([11, 12])) pipeline.run() def test_side_input_no_tag(self): class TestDoFn(DoFn): def process(self, element, prefix, suffix): return ['%s-%s-%s' % (prefix, element, suffix)] pipeline = TestPipeline() words_list = ['aa', 'bb', 'cc'] words = pipeline | 'SomeWords' >> Create(words_list) prefix = 'zyx' suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in result = words | 'DecorateWordsDoFnNoTag' >> ParDo( TestDoFn(), prefix, suffix=AsSingleton(suffix)) assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list])) pipeline.run() def test_side_input_tagged(self): class TestDoFn(DoFn): def process(self, element, prefix, suffix=DoFn.SideInputParam): return ['%s-%s-%s' % (prefix, element, suffix)] pipeline = TestPipeline() words_list = ['aa', 'bb', 'cc'] words = pipeline | 'SomeWords' >> Create(words_list) prefix = 'zyx' suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in result = words | 'DecorateWordsDoFnNoTag' >> ParDo( TestDoFn(), prefix, suffix=AsSingleton(suffix)) assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list])) pipeline.run() def test_window_param(self): class TestDoFn(DoFn): def process(self, element, window=DoFn.WindowParam): yield (element, (float(window.start), float(window.end))) pipeline = TestPipeline() pcoll = (pipeline | Create([1, 7]) | Map(lambda x: TimestampedValue(x, x)) | WindowInto(windowfn=SlidingWindows(10, 5)) | ParDo(TestDoFn())) assert_that(pcoll, equal_to([(1, (-5, 5)), (1, (0, 10)), (7, (0, 10)), (7, (5, 15))])) pcoll2 = pcoll | 'Again' >> ParDo(TestDoFn()) assert_that( pcoll2, equal_to([ ((1, (-5, 5)), (-5, 5)), ((1, (0, 10)), (0, 10)), ((7, (0, 10)), (0, 10)), ((7, (5, 15)), (5, 15))]), label='doubled windows') pipeline.run() def test_timestamp_param(self): class TestDoFn(DoFn): def process(self, element, timestamp=DoFn.TimestampParam): yield timestamp pipeline = TestPipeline() pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn()) assert_that(pcoll, equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP])) pipeline.run() class Bacon(PipelineOptions): @classmethod def _add_argparse_args(cls, parser): parser.add_argument('--slices', type=int) class Eggs(PipelineOptions): @classmethod def _add_argparse_args(cls, parser): parser.add_argument('--style', default='scrambled') class Breakfast(Bacon, Eggs): pass class PipelineOptionsTest(unittest.TestCase): def test_flag_parsing(self): options = Breakfast(['--slices=3', '--style=sunny side up', '--ignored']) self.assertEquals(3, options.slices) self.assertEquals('sunny side up', options.style) def test_keyword_parsing(self): options = Breakfast( ['--slices=3', '--style=sunny side up', '--ignored'], slices=10) self.assertEquals(10, options.slices) self.assertEquals('sunny side up', options.style) def test_attribute_setting(self): options = Breakfast(slices=10) self.assertEquals(10, options.slices) options.slices = 20 self.assertEquals(20, options.slices) def test_view_as(self): generic_options = PipelineOptions(['--slices=3']) self.assertEquals(3, generic_options.view_as(Bacon).slices) self.assertEquals(3, generic_options.view_as(Breakfast).slices) generic_options.view_as(Breakfast).slices = 10 self.assertEquals(10, generic_options.view_as(Bacon).slices) with self.assertRaises(AttributeError): generic_options.slices # pylint: disable=pointless-statement with self.assertRaises(AttributeError): generic_options.view_as(Eggs).slices # pylint: disable=expression-not-assigned def test_defaults(self): options = Breakfast(['--slices=3']) self.assertEquals(3, options.slices) self.assertEquals('scrambled', options.style) def test_dir(self): options = Breakfast() self.assertEquals( set(['from_dictionary', 'get_all_options', 'slices', 'style', 'view_as', 'display_data']), set([attr for attr in dir(options) if not attr.startswith('_')])) self.assertEquals( set(['from_dictionary', 'get_all_options', 'style', 'view_as', 'display_data']), set([attr for attr in dir(options.view_as(Eggs)) if not attr.startswith('_')])) class RunnerApiTest(unittest.TestCase): def test_simple(self): """Tests serializing, deserializing, and running a simple pipeline. More extensive tests are done at pipeline.run for each suitable test. """ p = beam.Pipeline() p | beam.Create([None]) | beam.Map(lambda x: x) # pylint: disable=expression-not-assigned proto = p.to_runner_api() p2 = Pipeline.from_runner_api(proto, p.runner, p._options) p2.run() if __name__ == '__main__': logging.getLogger().setLevel(logging.DEBUG) unittest.main()
manuzhang/incubator-beam
sdks/python/apache_beam/pipeline_test.py
Python
apache-2.0
15,533
[ "VisIt" ]
a58ec043225fa3f3bc75de8299f82ce79bacad4da9a32f57a963844df06e9349
adjectives = [ 'homely', 'rough', 'yellow', 'dry', 'deafening', 'substantial', 'zesty', 'watchful', 'kooky', 'small', 'grown', 'closed', 'offbeat', 'variable', 'specific', 'novel', 'marvelous', 'private', 'bony', 'best', 'hospitable', 'stupid', 'flickering', 'pesky', 'sane', 'organic', 'peppery', 'pink', 'original', 'handmade', 'devoted', 'mealy', 'aromatic', 'oily', 'growing', 'scary', 'essential', 'fluffy', 'flimsy', 'smoggy', 'runny', 'rosy', 'far', 'knowing', 'definitive', 'mature', 'realistic', 'Spanish', 'hairy', 'serious', 'potable', 'competent', 'attentive', 'frilly', 'authorized', 'round', 'awesome', 'bruised', 'wide', 'worthless', 'some', 'kosher', 'polished', 'scholarly', 'pitiful', 'generous', 'big', 'unlucky', 'jealous', 'valid', 'vibrant', 'icky', 'surprised', 'arid', 'humble', 'absolute', 'weird', 'glamorous', 'trim', 'dear', 'pristine', 'plain', 'astonishing', 'remorseful', 'noisy', 'our', 'right', 'basic', 'avaricious', 'deficient', 'brief', 'dishonest', 'eminent', 'new', 'bountiful', 'rare', 'misguided', 'bogus', 'timely', 'jovial', 'curvy', 'delirious', 'usable', 'sunny', 'quiet', 'belated', 'tart', 'shoddy', 'worse', 'feline', 'thankful', 'harmonious', 'lovely', 'infatuated', 'bare', 'powerful', 'snoopy', 'lame', 'illiterate', 'unwieldy', 'baggy', 'virtual', 'sour', 'pricey', 'punctual', 'turbulent', 'classic', 'happy', 'luxurious', 'firm', 'creepy', 'front', 'calm', 'grave', 'comfortable', 'internal', 'serene', 'scared', 'anchored', 'straight', 'cultivated', 'spiteful', 'odd', 'sizzling', 'present', 'criminal', 'neglected', 'sharp', 'vigorous', 'frivolous', 'soft', 'vengeful', 'humming', 'whispered', 'parched', 'sweaty', 'enlightened', 'shiny', 'assured', 'colorless', 'brave', 'rotten', 'optimal', 'vicious', 'slushy', 'edible', 'metallic', 'mindless', 'female', 'cheap', 'rusty', 'admired', 'whopping', 'imperturbable', 'failing', 'lanky', 'sneaky', 'elderly', 'neat', 'apprehensive', 'advanced', 'suburban', 'cheery', 'dim', 'onerlooked', 'understated', 'repentant', 'satisfied', 'smug', 'bitter', 'responsible', 'back', 'portly', 'active', 'heartfelt', 'sweltering', 'athletic', 'amused', 'which', 'harmless', 'crazy', 'massive', 'cloudy', 'immaculate', 'dirty', 'careful', 'overcooked', 'misty', 'ashamed', 'each', 'similar', 'piercing', 'sympathetic', 'heavenly', 'soupy', 'several', 'lonely', 'black', 'adorable', 'glittering', 'icy', 'speedy', 'vain', 'prudent', 'ironclad', 'mellow', 'quixotic', 'strong', 'spotless', 'upright', 'narrow', 'bustling', 'direct', 'weary', 'fuzzy', 'tepid', 'disguised', 'bronze', 'snarling', 'practical', 'proper', 'brisk', 'upbeat', 'unlined', 'downright', 'ethical', 'incompatible', 'tidy', 'single', 'unfit', 'amusing', 'probable', 'pushy', 'melodic', 'instructive', 'grotesque', 'bumpy', 'sparkling', 'sophisticated', 'funny', 'jaunty', 'grumpy', 'ill', 'inconsequential', 'real', 'whimsical', 'outstanding', 'gaseous', 'breakable', 'bewitched', 'moist', 'nervous', 'digital', 'wasteful', 'sociable', 'wilted', 'purple', 'little', 'mundane', 'French', 'bold', 'suspicious', 'handy', 'jumbo', 'insidious', 'meager', 'haunting', 'silent', 'idle', 'expensive', 'uncomfortable', 'writhing', 'impossible', 'capital', 'helpful', 'overdue', 'accurate', 'elated', 'spry', 'lumpy', 'decent', 'tame', 'live', 'villainous', 'separate', 'ornate', 'miserable', 'raw', 'incomplete', 'sentimental', 'high', 'equal', 'worst', 'puzzled', 'weekly', 'slim', 'burdensome', 'incomparable', 'left', 'gentle', 'cooked', 'natural', 'definite', 'imaginary', 'gleaming', 'esteemed', 'unacceptable', 'blue', 'whole', 'frightened', 'busy', 'nutritious', 'worthy', 'square', 'waterlogged', 'stimulating', 'late', 'ample', 'mountainous', 'untrue', 'euphoric', 'sturdy', 'elliptical', 'pale', 'complicated', 'lovable', 'forsaken', 'legal', 'inborn', 'weighty', 'muddy', 'simple', 'tempting', 'fumbling', 'wee', 'willing', 'another', 'limited', 'worthwhile', 'voluminous', 'witty', 'official', 'disloyal', 'open', 'feminine', 'complete', 'musty', 'delightful', 'hopeful', 'flamboyant', 'victorious', 'entire', 'occasional', 'alarmed', 'infinite', 'jolly', 'unrealistic', 'limp', 'superb', 'granular', 'complex', 'tedious', 'bowed', 'staid', 'livid', 'buttery', 'heavy', 'productive', 'super', 'needy', 'pointless', 'thin', 'wonderful', 'qualified', 'venerated', 'compassionate', 'vivacious', 'cluttered', 'faint', 'triangular', 'anguished', 'exotic', 'obvious', 'loyal', 'limping', 'powerless', 'different', 'conventional', 'common', 'mad', 'studious', 'pungent', 'unknown', 'fat', 'worried', 'hidden', 'wiggly', 'content', 'kaleidoscopic', 'alive', 'daring', 'good', 'nimble', 'disastrous', 'informal', 'spherical', 'puzzling', 'glistening', 'courteous', 'miserly', 'deep', 'worrisome', 'muted', 'fair', 'near', 'deserted', 'yawning', 'guilty', 'drab', 'shimmering', 'damaged', 'frosty', 'exalted', 'lavish', 'darling', 'itchy', 'urban', 'immediate', 'enraged', 'functional', 'monthly', 'identical', 'secret', 'delectable', 'untimely', 'harmful', 'close', 'short', 'energetic', 'uncommon', 'past', 'fine', 'firsthand', 'obese', 'illustrious', 'fussy', 'bossy', 'ruddy', 'phony', 'yummy', 'international', 'passionate', 'klutzy', 'hungry', 'reflecting', 'blissful', 'blind', 'required', 'svelte', 'eager', 'bland', 'hilarious', 'merry', 'dismal', 'crowded', 'wavy', 'zigzag', 'disfigured', 'shabby', 'enchanted', 'lasting', 'mushy', 'tense', 'any', 'petty', 'automatic', 'woozy', 'gray', 'medical', 'sarcastic', 'conscious', 'gifted', 'focused', 'wordy', 'wry', 'optimistic', 'unsteady', 'last', 'actual', 'first', 'horrible', 'gleeful', 'poised', 'young', 'hurtful', 'attached', 'healthy', 'tough', 'honest', 'blaring', 'flashy', 'poor', 'opulent', 'dapper', 'experienced', 'testy', 'gracious', 'bubbly', 'formal', 'wild', 'delicious', 'impassioned', 'aged', 'acidic', 'deadly', 'honored', 'previous', 'calculating', 'pretty', 'loving', 'average', 'perky', 'gloomy', 'improbable', 'next', 'vague', 'polite', 'enormous', 'scarce', 'costly', 'embellished', 'traumatic', 'shadowy', 'medium', 'sardonic', 'idealistic', 'slippery', 'grizzled', 'better', 'scrawny', 'remote', 'submissive', 'intrepid', 'personal', 'trivial', 'friendly', 'few', 'shady', 'glaring', 'arctic', 'simplistic', 'thirsty', 'wrong', 'untried', 'sick', 'warm', 'numb', 'boring', 'ecstatic', 'rash', 'utilized', 'precious', 'free', 'stunning', 'irresponsible', 'minor', 'tan', 'idiotic', 'recent', 'ragged', 'immense', 'liquid', 'emotional', 'lively', 'querulous', 'vacant', 'favorite', 'steep', 'mediocre', 'harsh', 'false', 'corrupt', 'rewarding', 'wicked', 'curly', 'dreary', 'biodegradable', 'keen', 'knobby', 'fresh', 'knowledgeable', 'supportive', 'majestic', 'gargantuan', 'imperfect', 'snappy', 'burly', 'frayed', 'aggravating', 'leafy', 'adored', 'pessimistic', 'clean', 'indelible', 'excited', 'parallel', 'outrageous', 'jumpy', 'steel', 'distant', 'sugary', 'periodic', 'hateful', 'blushing', 'soggy', 'ultimate', 'tiny', 'hideous', 'useful', 'beneficial', 'nippy', 'mortified', 'austere', 'dearest', 'acrobatic', 'wiry', 'innocent', 'sinful', 'inferior', 'beautiful', 'crafty', 'windy', 'concrete', 'nice', 'half', 'accomplished', 'clumsy', 'familiar', 'menacing', 'unfortunate', 'reasonable', 'smooth', 'worn', 'tricky', 'spirited', 'greedy', 'bulky', 'candid', 'creative', 'multicolored', 'cool', 'monumental', 'nonstop', 'profitable', 'double', 'fearful', 'robust', 'ancient', 'valuable', 'idolized', 'sad', 'radiant', 'kindhearted', 'knotty', 'unhappy', 'old', 'plastic', 'rapid', 'utter', 'awful', 'scratchy', 'male', 'intentional', 'flustered', 'wealthy', 'zealous', 'foolhardy', 'illegal', 'stable', 'unwilling', 'only', 'yearly', 'angry', 'hasty', 'positive', 'wretched', 'mean', 'threadbare', 'useless', 'squiggly', 'slimy', 'husky', 'jagged', 'velvety', 'unselfish', 'nifty', 'lost', 'mixed', 'diligent', 'unimportant', 'somber', 'showy', 'imaginative', 'paltry', 'elaborate', 'chubby', 'zany', 'starchy', 'composed', 'peaceful', 'wise', 'leading', 'orderly', 'cultured', 'starry', 'faraway', 'thorough', 'standard', 'weepy', 'rude', 'angelic', 'safe', 'youthful', 'charming', 'grouchy', 'rural', 'aware', 'enchanting', 'academic', 'adorable', 'abandoned', 'monstrous', 'prize', 'quintessential', 'thoughtful', 'fragrant', 'chief', 'juvenile', 'hard', 'detailed', 'alienated', 'orange', 'alert', 'proud', 'tremendous', 'cumbersome', 'woeful', 'hollow', 'trained', 'unsightly', 'subtle', 'cute', 'glossy', 'untidy', 'soulful', 'wrathful', 'critical', 'impish', 'overjoyed', 'subdued', 'incredible', 'cooperative', 'trusting', 'uniform', 'wooden', 'tasty', 'finished', 'spicy', 'partial', 'unruly', 'gullible', 'antique', 'palatable', 'vast', 'unnatural', 'crisp', 'striped', 'apt', 'extroverted', 'mild', 'clever', 'rectangular', 'thrifty', 'winding', 'able', 'clueless', 'early', 'flowery', 'fortunate', 'rigid', 'prime', 'babyish', 'strict', 'growling', 'perfumed', 'frugal', 'stiff', 'every', 'amazing', 'important', 'circular', 'rich', 'celebrated', 'interesting', 'green', 'tragic', 'dependable', 'likable', 'difficult', 'smart', 'fake', 'brilliant', 'plaintive', 'defiant', 'honorable', 'lawful', 'great', 'cylindrical', 'frozen', 'unwitting', 'giving', 'nasty', 'unpleasant', 'exhausted', 'buzzing', 'agreeable', 'married', 'delayed', 'ideal', 'major', 'discrete', 'lucky', 'low', 'frizzy', 'troubled', 'glorious', 'dark', 'unusual', 'adept', 'expert', 'even', 'watery', 'negligible', 'mammoth', 'dreary', 'filthy', 'unkempt', 'motionless', 'that', 'stale', 'unique', 'fatal', 'oval', 'rubbery', 'warlike', 'altruistic', 'creamy', 'unaware', 'lumbering', 'upset', 'impolite', 'offensive', 'light', 'famous', 'animated', 'normal', 'respectful', 'fantastic', 'gross', 'envious', 'chilly', 'joint', 'fickle', 'large', 'royal', 'unhealthy', 'superior', 'dual', 'gruesome', 'silky', 'insignificant', 'trifling', 'other', 'exemplary', 'grubby', 'distant', 'torn', 'prickly', 'pastel', 'wet', 'courageous', 'pertinent', 'barren', 'loose', 'confused', 'thunderous', 'pleasing', 'junior', 'welcome', 'kind', 'insecure', 'fancy', 'dense', 'agitated', 'unfolded', 'crooked', 'ugly', 'trustworthy', 'putrid', 'selfish', 'sweet', 'neighboring', 'unwritten', 'electric', 'dependent', 'used', 'lean', 'queasy', 'ringed', 'tubby', 'milky', 'ripe', 'true', 'truthful', 'handsome', 'infamous', 'questionable', 'luminous', 'gummy', 'genuine', 'fearless', 'flaky', 'obedient', 'quick', 'oddball', 'golden', 'oblong', 'revolving', 'meek', 'noxious', 'acclaimed', 'frail', 'decimal', 'quaint', 'fixed', 'webbed', 'uneven', 'intelligent', 'elegant', 'outlandish', 'grimy', 'regal', 'stingy', 'fluid', 'weak' ] animals = [ 'Abyssinian', 'Affenpinscher', 'Akbash', 'Akita', 'Albatross', 'Alligator', 'Angelfish', 'Ant', 'Anteater', 'Antelope', 'Armadillo', 'Avocet', 'Axolotl', 'Baboon', 'Badger', 'Balinese', 'Bandicoot', 'Barb', 'Barnacle', 'Barracuda', 'Bat', 'Beagle', 'Bear', 'Beaver', 'Beetle', 'Binturong', 'Bird', 'Birman', 'Bison', 'Bloodhound', 'Bobcat', 'Bombay', 'Bongo', 'Bonobo', 'Booby', 'Budgerigar', 'Buffalo', 'Bulldog', 'Bullfrog', 'Burmese', 'Butterfly', 'Caiman', 'Camel', 'Capybara', 'Caracal', 'Cassowary', 'Cat', 'Caterpillar', 'Catfish', 'Centipede', 'Chameleon', 'Chamois', 'Cheetah', 'Chicken', 'Chihuahua', 'Chimpanzee', 'Chinchilla', 'Chinook', 'Chipmunk', 'Cichlid', 'Coati', 'Cockroach', 'Collie', 'Coral', 'Cougar', 'Cow', 'Coyote', 'Crab', 'Crane', 'Crocodile', 'Cuscus', 'Cuttlefish', 'Dachshund', 'Dalmatian', 'Deer', 'Dhole', 'Dingo', 'Discus', 'Dodo', 'Dog', 'Dolphin', 'Donkey', 'Dormouse', 'Dragonfly', 'Drever', 'Duck', 'Dugong', 'Dunker', 'Eagle', 'Earwig', 'Echidna', 'Elephant', 'Emu', 'Falcon', 'Ferret', 'Fish', 'Flamingo', 'Flounder', 'Fly', 'Fossa', 'Fox', 'Frigatebird', 'Frog', 'Gar', 'Gecko', 'Gerbil', 'Gharial', 'Gibbon', 'Giraffe', 'Goat', 'Goose', 'Gopher', 'Gorilla', 'Grasshopper', 'Greyhound', 'Grouse', 'Guppy', 'Hamster', 'Hare', 'Harrier', 'Havanese', 'Hedgehog', 'Heron', 'Himalayan', 'Hippopotamus', 'Horse', 'Human', 'Hummingbird', 'Hyena', 'Ibis', 'Iguana', 'Impala', 'Indri', 'Insect', 'Jackal', 'Jaguar', 'Javanese', 'Jellyfish', 'Kakapo', 'Kangaroo', 'Kingfisher', 'Kiwi', 'Koala', 'Kudu', 'Labradoodle', 'Ladybird', 'Lemming', 'Lemur', 'Leopard', 'Liger', 'Lion', 'Lionfish', 'Lizard', 'Llama', 'Lobster', 'Lynx', 'Macaw', 'Magpie', 'Maltese', 'Manatee', 'Mandrill', 'Markhor', 'Mastiff', 'Mayfly', 'Meerkat', 'Millipede', 'Mole', 'Molly', 'Mongoose', 'Mongrel', 'Monkey', 'Moorhen', 'Moose', 'Moth', 'Mouse', 'Mule', 'Neanderthal', 'Newfoundland', 'Newt', 'Nightingale', 'Numbat', 'Ocelot', 'Octopus', 'Okapi', 'Olm', 'Opossum', 'Ostrich', 'Otter', 'Oyster', 'Pademelon', 'Panda', 'Panther', 'Parrot', 'Peacock', 'Pekingese', 'Pelican', 'Penguin', 'Persian', 'Pheasant', 'Pig', 'Pika', 'Pike', 'Piranha', 'Platypus', 'Pointer', 'Poodle', 'Porcupine', 'Possum', 'Prawn', 'Puffin', 'Pug', 'Puma', 'Quail', 'Quetzal', 'Quokka', 'Quoll', 'Rabbit', 'Raccoon', 'Ragdoll', 'Rat', 'Rattlesnake', 'Reindeer', 'Rhinoceros', 'Robin', 'Rottweiler', 'Salamander', 'Saola', 'Scorpion', 'Seahorse', 'Seal', 'Serval', 'Sheep', 'Shrimp', 'Siamese', 'Siberian', 'Skunk', 'Sloth', 'Snail', 'Snake', 'Snowshoe', 'Somali', 'Sparrow', 'Sponge', 'Squid', 'Squirrel', 'Starfish', 'Stingray', 'Stoat', 'Swan', 'Tang', 'Tapir', 'Tarsier', 'Termite', 'Tetra', 'Tiffany', 'Tiger', 'Tortoise', 'Toucan', 'Tropicbird', 'Tuatara', 'Turkey', 'Uakari', 'Uguisu', 'Umbrellabird', 'Vulture', 'Wallaby', 'Walrus', 'Warthog', 'Wasp', 'Weasel', 'Whippet', 'Wildebeest', 'Wolf', 'Wolverine', 'Wombat', 'Woodlouse', 'Woodpecker', 'Wrasse', 'Yak', 'Zebra', 'Zebu', 'Zonkey', 'Zorse' ]
MatthewJWalls/nomenclature
names.py
Python
mit
18,679
[ "Jaguar", "MOOSE", "Octopus" ]
dfbde6dbc7403ce2e8c827145f945f08ccf6cfe2a3b07f44c8411b71a8bba523
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2020 F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type import os import semver import sys from .lib.common import BASE_DIR from invoke import task try: from jinja2 import Environment from jinja2 import FileSystemLoader HAS_JINJA = True except ImportError: HAS_JINJA = False if HAS_JINJA: JINJA_ENV = Environment( loader=FileSystemLoader(BASE_DIR + '/devtools/stubs') ) BUILD_DIR = '{0}/builds'.format(BASE_DIR) HELP = dict( filename="Name of the tarball file to be published found in builds directory.", api_key="The api key from Ansible Galaxy to be used to upload", ah="Indicates if we are publishing AH." ) def update_galaxy_file(version, collection): # updates galaxy.yml file for collection update galaxy_file = '{0}/ansible_collections/f5networks/{1}/galaxy.yml'.format(BASE_DIR, collection) template = JINJA_ENV.get_template('collection_galaxy_{0}.yml'.format(collection)) content = template.render(version=version, collection=collection) fh = open(galaxy_file, 'w') fh.write(content) fh.close() def validate_version(version): try: semver.parse_version_info(version) except ValueError as ex: print(str(ex)) sys.exit(1) @task(name='change-version', optional=['collection'], help=dict( version="What version of collection to set in the galaxy.yml file, the version must follow in SemVer format.", collection="The collection name for which the galaxy.yml file to be changed, DEFAULT: 'f5_modules'.", )) def change_galaxy_version(c, version, collection='f5_modules'): """Changes version of the collection in galaxy.yml file.""" validate_version(version) update_galaxy_file(version, collection) print("File galaxy.yml updated.") @task(optional=['collection'], help=dict( version="Version of the collection to build, the version must follow in SemVer format.", collection="The collection name to which the modules are upstreamed, DEFAULT: 'f5_modules'.", update="Allows updating galaxy file when requested." )) def build(c, version, collection='f5_modules', update=True): """Creates collection builds in the ansible_collections/_build directory.""" validate_version(version) if update: update_galaxy_file(version, collection) if not os.path.exists(BUILD_DIR): os.makedirs(BUILD_DIR) coll_dest = '{0}/ansible_collections/f5networks/{1}'.format(BASE_DIR, collection) cmd = 'ansible-galaxy collection build {0} -f --output-path {1}'.format(coll_dest, BUILD_DIR) c.run(cmd) @task(optional=['ah'], help=HELP) def publish(c, filename, api_key, ah=None): """Publish collection on Galaxy.""" file = '{0}/{1}'.format(BUILD_DIR, os.path.basename(filename)) if not os.path.exists(file): print("Requested file {0} not found.".format(file)) sys.exit(1) if ah: cmd = 'ansible-galaxy collection publish {0} --api-key={1} -s https://cloud.redhat.com/ansible/automation-hub'.format(file, api_key) else: cmd = 'ansible-galaxy collection publish {0} --api-key={1}'.format(file, api_key) c.run(cmd) @task(help=dict( version="What version of collection to set in the galaxy.yml file, the version must follow in SemVer format." )) def changelog(c, version): """Build changelog and update galaxy.yml file with next version number.""" collection = '{0}/ansible_collections/f5networks/f5_modules'.format(BASE_DIR) validate_version(version) with c.cd(collection): print('Linting changelog fragments.') cmd = "antsibull-changelog lint -v" result = c.run(cmd, warn=True) if result.failed: sys.exit(1) print('Generating changelog.') cmd = 'antsibull-changelog release' result = c.run(cmd, warn=True) if result.failed: sys.exit(1) print('Updating galaxy.yaml file.') update_galaxy_file(version, 'f5_modules') print('Changelog release complete.')
F5Networks/f5-ansible-modules
tasks/collection.py
Python
mit
4,203
[ "Galaxy" ]
5acca6d995b46eb81e525985a6db8acf5720540f63ed7500fe7e1532674250b5
"""generates atoms object from .smc file and views in ase gui usage: python viewCluster.py smcfile where smcfile is the full path to .smc file """ import sys from montecarlo import * from ase.cluster.cubic import FaceCenteredCubic import ase from data import fcc from atommontecarlodata import AtomMonteCarloData from ase.visualize import view if len(sys.argv) < 2: print >>sys.stderr, __doc__ sys.exit(1) #Store path to file pp = str(sys.argv[1]) nconf= int(sys.argv[2]) #Instantiate d as SurfaceM.. object d = SurfaceMonteCarloData() #Read data from file to d d.read(pp) surfaces = fcc.surface_names #Construct atoms atoms = FaceCenteredCubic('Au', surfaces, d[nconf][1], latticeconstant=4.055) view(atoms) #View atoms #sys.exit("Script Terminated with code 0")
auag92/n2dm
Asap-3.8.4/Projects/NanoparticleMC/misc/viewCluster.py
Python
mit
771
[ "ASE" ]
41267011b736e66d1d28bea4e1333bd2d6b9e24b8e3c92c00695c33db4b656f7
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function import os import itertools import unittest import pandas as pd import numpy as np import numpy.testing as npt from skbio import OrdinationResults from skbio.util import (get_data_path, assert_ordination_results_equal, assert_data_frame_almost_equal) from skbio.util._testing import _normalize_signs class TestGetDataPath(unittest.TestCase): def test_get_data_path(self): fn = 'parrot' path = os.path.dirname(os.path.abspath(__file__)) data_path = os.path.join(path, 'data', fn) data_path_2 = get_data_path(fn) self.assertEqual(data_path_2, data_path) class TestAssertOrdinationResultsEqual(unittest.TestCase): def test_assert_ordination_results_equal(self): minimal1 = OrdinationResults('foo', 'bar', pd.Series([1.0, 2.0]), pd.DataFrame([[1, 2, 3], [4, 5, 6]])) # a minimal set of results should be equal to itself assert_ordination_results_equal(minimal1, minimal1) # type mismatch with npt.assert_raises(AssertionError): assert_ordination_results_equal(minimal1, 'foo') # numeric values should be checked that they're almost equal almost_minimal1 = OrdinationResults( 'foo', 'bar', pd.Series([1.0000001, 1.9999999]), pd.DataFrame([[1, 2, 3], [4, 5, 6]])) assert_ordination_results_equal(minimal1, almost_minimal1) # test each of the optional numeric attributes for attr in ('features', 'samples', 'biplot_scores', 'sample_constraints'): # missing optional numeric attribute in one, present in the other setattr(almost_minimal1, attr, pd.DataFrame([[1, 2], [3, 4]])) with npt.assert_raises(AssertionError): assert_ordination_results_equal(minimal1, almost_minimal1) setattr(almost_minimal1, attr, None) # optional numeric attributes present in both, but not almost equal setattr(minimal1, attr, pd.DataFrame([[1, 2], [3, 4]])) setattr(almost_minimal1, attr, pd.DataFrame([[1, 2], [3.00002, 4]])) with npt.assert_raises(AssertionError): assert_ordination_results_equal(minimal1, almost_minimal1) setattr(minimal1, attr, None) setattr(almost_minimal1, attr, None) # optional numeric attributes present in both, and almost equal setattr(minimal1, attr, pd.DataFrame([[1.0, 2.0], [3.0, 4.0]])) setattr(almost_minimal1, attr, pd.DataFrame([[1.0, 2.0], [3.00000002, 4]])) assert_ordination_results_equal(minimal1, almost_minimal1) setattr(minimal1, attr, None) setattr(almost_minimal1, attr, None) # missing optional numeric attribute in one, present in the other almost_minimal1.proportion_explained = pd.Series([1, 2, 3]) with npt.assert_raises(AssertionError): assert_ordination_results_equal(minimal1, almost_minimal1) almost_minimal1.proportion_explained = None # optional numeric attributes present in both, but not almost equal minimal1.proportion_explained = pd.Series([1, 2, 3]) almost_minimal1.proportion_explained = pd.Series([1, 2, 3.00002]) with npt.assert_raises(AssertionError): assert_ordination_results_equal(minimal1, almost_minimal1) almost_minimal1.proportion_explained = None almost_minimal1.proportion_explained = None # optional numeric attributes present in both, and almost equal minimal1.proportion_explained = pd.Series([1, 2, 3]) almost_minimal1.proportion_explained = pd.Series([1, 2, 3.00000002]) assert_ordination_results_equal(minimal1, almost_minimal1) almost_minimal1.proportion_explained = None almost_minimal1.proportion_explained = None class TestNormalizeSigns(unittest.TestCase): def test_shapes_and_nonarray_input(self): with self.assertRaises(ValueError): _normalize_signs([[1, 2], [3, 5]], [[1, 2]]) def test_works_when_different(self): """Taking abs value of everything would lead to false positives.""" a = np.array([[1, -1], [2, 2]]) b = np.array([[-1, -1], [2, 2]]) with self.assertRaises(AssertionError): npt.assert_equal(*_normalize_signs(a, b)) def test_easy_different(self): a = np.array([[1, 2], [3, -1]]) b = np.array([[-1, 2], [-3, -1]]) npt.assert_equal(*_normalize_signs(a, b)) def test_easy_already_equal(self): a = np.array([[1, -2], [3, 1]]) b = a.copy() npt.assert_equal(*_normalize_signs(a, b)) def test_zeros(self): a = np.array([[0, 3], [0, -1]]) b = np.array([[0, -3], [0, 1]]) npt.assert_equal(*_normalize_signs(a, b)) def test_hard(self): a = np.array([[0, 1], [1, 2]]) b = np.array([[0, 1], [-1, 2]]) npt.assert_equal(*_normalize_signs(a, b)) def test_harder(self): """We don't want a value that might be negative due to floating point inaccuracies to make a call to allclose in the result to be off.""" a = np.array([[-1e-15, 1], [5, 2]]) b = np.array([[1e-15, 1], [5, 2]]) # Clearly a and b would refer to the same "column # eigenvectors" but a slopppy implementation of # _normalize_signs could change the sign of column 0 and make a # comparison fail npt.assert_almost_equal(*_normalize_signs(a, b)) def test_column_zeros(self): a = np.array([[0, 1], [0, 2]]) b = np.array([[0, -1], [0, -2]]) npt.assert_equal(*_normalize_signs(a, b)) def test_column_almost_zero(self): a = np.array([[1e-15, 3], [-2e-14, -6]]) b = np.array([[0, 3], [-1e-15, -6]]) npt.assert_almost_equal(*_normalize_signs(a, b)) class TestAssertDataFrameAlmostEqual(unittest.TestCase): def setUp(self): self.df = pd.DataFrame( {'foo': [42, 42.0, np.nan, 0], 'bar': ['a', 'b', 'cd', 'e']}) def test_not_equal(self): unequal_dfs = [ self.df, # floating point error too large to be "almost equal" pd.DataFrame({'foo': [42, 42.001, np.nan, 0], 'bar': ['a', 'b', 'cd', 'e']}), # extra NaN pd.DataFrame({'foo': [42, np.nan, np.nan, 0], 'bar': ['a', 'b', 'cd', 'e']}), # different column order pd.DataFrame(self.df, columns=['foo', 'bar']), # different index order pd.DataFrame(self.df, index=np.arange(4)[::-1]), # different index type pd.DataFrame(self.df, index=np.arange(4).astype(float)), # various forms of "empty" DataFrames that are not equivalent pd.DataFrame(), pd.DataFrame(index=np.arange(10)), pd.DataFrame(columns=np.arange(10)), pd.DataFrame(index=np.arange(10), columns=np.arange(10)), pd.DataFrame(index=np.arange(9)), pd.DataFrame(columns=np.arange(9)), pd.DataFrame(index=np.arange(9), columns=np.arange(9)) ] # each df should compare equal to itself for df in unequal_dfs: assert_data_frame_almost_equal(df, df) # every pair of dfs should not compare equal. use permutations instead # of combinations to test that comparing df1 to df2 and df2 to df1 are # both not equal for df1, df2 in itertools.permutations(unequal_dfs, 2): with self.assertRaises(AssertionError): assert_data_frame_almost_equal(df1, df2) def test_equal(self): equal_dfs = [ self.df, # floating point error small enough to be "almost equal" pd.DataFrame({'foo': [42, 42.00001, np.nan, 0], 'bar': ['a', 'b', 'cd', 'e']}) ] for df in equal_dfs: assert_data_frame_almost_equal(df, df) for df1, df2 in itertools.permutations(equal_dfs, 2): assert_data_frame_almost_equal(df1, df2) if __name__ == '__main__': unittest.main()
jairideout/scikit-bio
skbio/util/tests/test_testing.py
Python
bsd-3-clause
9,114
[ "scikit-bio" ]
56c63b1d06c6fd3757931ffd594eb8870628bc8db4da40b59626c92fe2d2c85a
"""rbf - Radial basis functions for interpolation/smoothing scattered Nd data. Written by John Travers <jtravs@gmail.com>, February 2007 Based closely on Matlab code by Alex Chirokov Additional, large, improvements by Robert Hetland Some additional alterations by Travis Oliphant Permission to use, modify, and distribute this software is given under the terms of the SciPy (BSD style) license. See LICENSE.txt that came with this distribution for specifics. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. Copyright (c) 2006-2007, Robert Hetland <hetland@tamu.edu> Copyright (c) 2007, John Travers <jtravs@gmail.com> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Robert Hetland nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from __future__ import division, print_function, absolute_import import sys import numpy as np from scipy import linalg from scipy._lib.six import callable, get_method_function, get_function_code from scipy.special import xlogy __all__ = ['Rbf'] class Rbf(object): """ Rbf(*args) A class for radial basis function approximation/interpolation of n-dimensional scattered data. Parameters ---------- *args : arrays x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes and d is the array of values at the nodes function : str or callable, optional The radial basis function, based on the radius, r, given by the norm (default is Euclidean distance); the default is 'multiquadric':: 'multiquadric': sqrt((r/self.epsilon)**2 + 1) 'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1) 'gaussian': exp(-(r/self.epsilon)**2) 'linear': r 'cubic': r**3 'quintic': r**5 'thin_plate': r**2 * log(r) If callable, then it must take 2 arguments (self, r). The epsilon parameter will be available as self.epsilon. Other keyword arguments passed in will be available as well. epsilon : float, optional Adjustable constant for gaussian or multiquadrics functions - defaults to approximate average distance between nodes (which is a good start). smooth : float, optional Values greater than zero increase the smoothness of the approximation. 0 is for interpolation (default), the function will always go through the nodal points in this case. norm : callable, optional A function that returns the 'distance' between two points, with inputs as arrays of positions (x, y, z, ...), and an output as an array of distance. E.g, the default:: def euclidean_norm(x1, x2): return sqrt( ((x1 - x2)**2).sum(axis=0) ) which is called with x1=x1[ndims,newaxis,:] and x2=x2[ndims,:,newaxis] such that the result is a matrix of the distances from each point in x1 to each point in x2. Examples -------- >>> from scipy.interpolate import Rbf >>> x, y, z, d = np.random.rand(4, 50) >>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance >>> xi = yi = zi = np.linspace(0, 1, 20) >>> di = rbfi(xi, yi, zi) # interpolated values >>> di.shape (20,) """ def _euclidean_norm(self, x1, x2): return np.sqrt(((x1 - x2)**2).sum(axis=0)) def _h_multiquadric(self, r): return np.sqrt((1.0/self.epsilon*r)**2 + 1) def _h_inverse_multiquadric(self, r): return 1.0/np.sqrt((1.0/self.epsilon*r)**2 + 1) def _h_gaussian(self, r): return np.exp(-(1.0/self.epsilon*r)**2) def _h_linear(self, r): return r def _h_cubic(self, r): return r**3 def _h_quintic(self, r): return r**5 def _h_thin_plate(self, r): return xlogy(r**2, r) # Setup self._function and do smoke test on initial r def _init_function(self, r): if isinstance(self.function, str): self.function = self.function.lower() _mapped = {'inverse': 'inverse_multiquadric', 'inverse multiquadric': 'inverse_multiquadric', 'thin-plate': 'thin_plate'} if self.function in _mapped: self.function = _mapped[self.function] func_name = "_h_" + self.function if hasattr(self, func_name): self._function = getattr(self, func_name) else: functionlist = [x[3:] for x in dir(self) if x.startswith('_h_')] raise ValueError("function must be a callable or one of " + ", ".join(functionlist)) self._function = getattr(self, "_h_"+self.function) elif callable(self.function): allow_one = False if hasattr(self.function, 'func_code') or \ hasattr(self.function, '__code__'): val = self.function allow_one = True elif hasattr(self.function, "im_func"): val = get_method_function(self.function) elif hasattr(self.function, "__call__"): val = get_method_function(self.function.__call__) else: raise ValueError("Cannot determine number of arguments to function") argcount = get_function_code(val).co_argcount if allow_one and argcount == 1: self._function = self.function elif argcount == 2: if sys.version_info[0] >= 3: self._function = self.function.__get__(self, Rbf) else: import new self._function = new.instancemethod(self.function, self, Rbf) else: raise ValueError("Function argument must take 1 or 2 arguments.") a0 = self._function(r) if a0.shape != r.shape: raise ValueError("Callable must take array and return array of the same shape") return a0 def __init__(self, *args, **kwargs): self.xi = np.asarray([np.asarray(a, dtype=np.float_).flatten() for a in args[:-1]]) self.N = self.xi.shape[-1] self.di = np.asarray(args[-1]).flatten() if not all([x.size == self.di.size for x in self.xi]): raise ValueError("All arrays must be equal length.") self.norm = kwargs.pop('norm', self._euclidean_norm) r = self._call_norm(self.xi, self.xi) self.epsilon = kwargs.pop('epsilon', None) if self.epsilon is None: # default epsilon is the "the average distance between nodes" dim = self.xi.shape[0] ximax = np.amax(self.xi, axis=1) ximin = np.amin(self.xi, axis=1) self.epsilon = np.power(np.prod(ximax - ximin)/self.N, 1.0/dim) self.smooth = kwargs.pop('smooth', 0.0) self.function = kwargs.pop('function', 'multiquadric') # attach anything left in kwargs to self # for use by any user-callable function or # to save on the object returned. for item, value in kwargs.items(): setattr(self, item, value) self.A = self._init_function(r) - np.eye(self.N)*self.smooth self.nodes = linalg.solve(self.A, self.di) def _call_norm(self, x1, x2): if len(x1.shape) == 1: x1 = x1[np.newaxis, :] if len(x2.shape) == 1: x2 = x2[np.newaxis, :] x1 = x1[..., :, np.newaxis] x2 = x2[..., np.newaxis, :] return self.norm(x1, x2) def __call__(self, *args): args = [np.asarray(x) for x in args] if not all([x.shape == y.shape for x in args for y in args]): raise ValueError("Array lengths must be equal") shp = args[0].shape xa = np.asarray([a.flatten() for a in args], dtype=np.float_) r = self._call_norm(xa, self.xi) return np.dot(self._function(r), self.nodes).reshape(shp)
ales-erjavec/scipy
scipy/interpolate/rbf.py
Python
bsd-3-clause
9,416
[ "Gaussian" ]
7649b5b5d5a58b0ffb205774c8b2988bc92263f4b0af152b5eb1943a2118dcc8
# -*- coding: utf-8 -*- { '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" je opcioni izraz kao sto je "polje1=\'nova vrijednost\'". Ne mozete azurirati ili izbristi rezultati JOIN-a', '# of Houses Damaged': 'Broj oštećenih kuća', '# of Houses Destroyed': 'Broj uništenih kuća', '# of International Staff': 'Broj međunarodnog osoblja', '# of National Staff': 'Broj nacionalnog osoblja', '# of People Deceased': 'Broj prenminulih ljudi', '# of People Injured': 'Broj povrijeđenih osoba', '# of Vehicles': 'Broj vozila', '# Results per query': 'Broj rezultata po upitu', '# selected': '# odabrano', '%(app)s not installed. Ask the Server Administrator to install on Server.': '%(app)s nije instaliran. Pitajte administratora servera da vam to instalira na serveru.', '%(count)s Recipients': '%(count)s primalaca', '%(count)s Roles of the user removed': '%(count)s uloga korisnika obrisano', '%(count)s Users removed from Role': '%(count)s korisnika izbačeno iz uloge', '%(count_of)d translations have been imported to the %(language)s language file': '%(count_of)d prijevoda je uvezeno u %(language)s jezičku datoteku', '%(GRN)s Number': '%(GRN)s broj', '%(GRN)s Status': '%(GRN)s Status', '%(item)s requested from %(site)s': '%(item)s zahtijevano sa %(site)s', '%(label)s contains %(values)s': '%(label)s sadrži %(values)s', '%(label)s contains any of %(values)s': '%(label)s sadrži jedno od %(values)s', '%(label)s does not contain %(values)s': '%(label)s ne sadrži %(values)s', '%(label)s is %(values)s': '%(label)s je %(values)s', '%(label)s like %(values)s': '%(label)s želi %(values)s', '%(label)s not like %(values)s': '%(label)s ne želiu %(values)s', '%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\nAko je tip zahtjeva "%(type)s", molim unesite %(type)s na slijedećem ekranu.', '%(msg)s\r\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\r\nAko je tip zahtjeva "%(type)s", molim unesite %(type)s na slijedećem ekranu.', '%(pe)s in %(location)s': '%(pe)s u %(location)s', '%(PO)s Number': '%(PO)s broj', '%(proj4js)s definition': '%(proj4js)s definicija', '%(quantity)s in stock': '%(quantity)s na zalihi', '%(REQ)s Number': '%(REQ)s broj', '%(resource)s Filter': '%(resource)s Filter', '%(site)s (Recipient)': '%(site)s (primaoc)', '%(site)s has no items exactly matching this request. There may still be other items in stock which can fulfill this request!': '%(site)s nema stavki koje odgovaraju ovom zahtjevu. Možda ima drugih stavki koje mogu ispuniti ovaj zahtjev!', '%(site_label)s Status': '%(site_label)s Status', '%(site_label)s Status added': '%(site_label)s Status dodan', '%(site_label)s Status deleted': '%(site_label)s Status obrisan', '%(site_label)s Status updated': '%(site_label)s Status ažuriran', '%(system_name)s - New User Registered': '%(system_name)s - Novi korisnik registrovan', '%(system_name)s - New User Registration Approval Pending': '%(system_name)s - Zahtjev za registracijom novog korisnika', '%(system_name)s - Verify Email': '%(system_name) - Potvrdite vaš Email', '%(system_name)s has sent an email to %(email)s to verify your email address.nPlease check your email to verify this address. If you do not receive this email please check you junk email or spam filters.': '%(system_name)s je poslao poštu za %(email)s da provjeru vašu adresu elektronske pošte.Molim provjerite vašu elektronsku poštu da ovjerite ovu adresu. Ako ne primite poruku elektronske pošte, provjerite vaše spam filtere ili poruke u smeću.', '%m-%d-%Y': '%d-%m-%Y', '%m-%d-%Y %H:%M:%S': '%m-%d-%Y %H:%M:%S', '%s linked to %s': '%s vezan za %s', '%s or %s': '%s ili %s', '%s rows deleted': '%s redova uklonjeno', '%s rows updated': '%s redova ažurirano', '%s selected': '%s izabrano', '%Y-%m-%d': '%d-%m-%Y', '%Y-%m-%d %H:%M': '%d-%m-%Y %H:%M', '%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S', '%Y-%m-%d %H:%M:00': '%d.%m.%Y. %H:%M:00', '& then click on the map below to adjust the Lat/Lon fields': 'i pritisnete na mapu ispod za podešavanje geografske Dužine/Širine', "'%s %%{row} deleted',nrows": "'%s %%{row} obrisan',nrows", "'%s %%{row} updated',nrows": "'%s %%{row} ažurirano',nrows", "'Cancel' will indicate an asset log entry did not occur": "'Otkaži' će indicirati na to se unos sredstva u zapisnik nije desio.", '(filtered from _MAX_ total entries)': '(filtrirano iz _MAX_ elemenata)', '* Required Fields': '* Obavezna polja', '...or add a new bin': '...ili dodaj novu korpu', '0-15 minutes': '0-15 minuta', '1 Assessment': '1 Procjena', '1 location, shorter time, can contain multiple Tasks': '1 lokacija, kraće vrijeme, može sadržavati više Zadataka', '1-3 days': '1-3 dana', '1. Fill the necessary fields in BLOCK CAPITAL letters.': '1. Popunite potrebna polja VELIKIM SLOVIMA.', '15-30 minutes': '15-30 minuta', '2 different options are provided here currently:': '2 različite opcije su pružene trenutno:', '2. Always use one box per letter and leave one box space to separate words.': '2. Uvijek koristite jednu kućicu po slovu i koristite praznu kućicu da odvajate riječi', '2x4 Car': '2x4 auto', '3. Fill in the circles completely.': '3. Kružiće potpuno popunite', '30-60 minutes': '30 do 60 minuta', '3W Report': '3W izvještaj', '4-7 days': '4-7 dana', '4x4 Car': '4x4 auto', '8-14 days': '8-14 dana', '_NUM_ duplicates found': '_NUM_ duplikata nađeno', 'A block of rich text which could be embedded into a page, viewed as a complete page or viewed as a list of news items.': 'Blok bogatog teksta koji se može ugraditi u stranicu, vidljiv kao potpuna strana ili vidljiv kao lista novih stavki.', 'A brief description of the group (optional)': 'Kratki opis grupe (proizvoljno)', 'A catalog of different Assessment Templates including summary information': 'Katalog raznih predložaka procjena uključujući sumarne informacije', 'A collection of Feature Classes which can be displayed together on a map or exported together.': 'Skup klasa karakteristika koje se mogu prikazati na mapi ili izvesti zajedno', 'A file downloaded from a GPS containing a series of geographic points in XML format.': 'Dokument skinut iz GPS-a koji sadrži geografske lokacije u XML formatu', 'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Datoteka u GPX formatu uzeta iz GPS-a čije vremenske oznake mogu biti povezane sa vremenskim oznakama na slikama da bi ih locirali na mapi', 'A file in GPX format taken from a GPS.': 'Datoteka u GPX formatu uzeta s GPS.', 'A library of digital resources, such as photos, documents and reports': 'Biblioteka digitalnih izvora, kao što su fotografije, dokumenti i izvješća', 'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': 'Grupa lokacija se može iskoristiti da se definiše obim pogođene oblasti, ako ne spada unutar jedne administrativne regije.', 'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'Lokacijska grupa je skup lokacija (često set administrativnih regija koje predstavljaju kombinovano područje).', 'A location group is a set of locations (often, a set of administrative regions representing a combined area). Member locations are added to a location group here. Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group. A location group can be used to define the extent of an affected area, if it does not fall within one administrative region. Location groups can be used in the Regions menu.': 'Grupa lokacija je skup lokacija (često, skup administrativnih regija koji predstavlja kombinovano područje). Članske lokacije se dodaje grupi lokacija ovdje. Grupe lokacija se mogu koristiti za filtriranje onoga što je prikazano na karti i na rezultate pretraživanja samo po mjestima unutar grupe lokacija. Grupe lokacija se mogu koristiti za definiranje područja na ugroženom području, ako one ne spadaju u jedan administrativni region. Grupe lokacija mogu se koristiti u meniju regiona.', 'A location group must have at least one member.': 'Grupa lokacije mora imati bar jednog člana', "A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'Lokacija koja određuje geografsko područje ove regije. Ovo može biti mjesto iz lokacijske hijerarhije , ili grupna lokacija , ili lokacija na granici područja', 'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'Oznaka je dodijeljena pojedinačnoj lokaciji u slučaju da postoji potreba za zamjenu oznake dodijeljene Klasi karakteristika.', 'A place within a Site like a Shelf, room, bin number etc.': 'Tačka na mjestu, poput police, sobe, broja korpe itd.', 'A project milestone marks a significant date in the calendar which shows that progress towards the overall objective is being made.': 'Projektni miljokaz predstavlja značajan datum u kalendaru koji pokazuje da je napredak prema glavnom cilju postignut.', 'A Reference Document such as a file, URL or contact person to verify this data.': 'Prateći dokument u vidu datoteke, URL-a ili kontakt osobe za potvrdu ovih podataka.', 'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'Referentni dokument, poput datoteke, URL-a ili kontakt osobe da se verificiraju ovi podaci. Možete ukucati prvih nekoliko karaktera naziva dokumenta da bi se povezalo s postojećim dokumentom.', 'A strict location hierarchy cannot have gaps.': 'Stroga hijerarhija lokacija ne može imati rupa', 'A task is a piece of work that an individual or team can do in 1-2 days': 'Zadatak je dio posla koji se samostalno ili u timu može završiti za 1-2 dana.', 'A task is a piece of work that an individual or team can do in 1-2 days.': 'Zadatak je dio posla koji se samostalno ili u timu može završiti za 1-2 dana.', 'A Warehouse is a physical place to store items.': 'Skladište je fizičko mjesto za smještanje predmeta.', 'Abbreviation': 'Skraćenica', 'Ability to customize the list of details tracked at a Shelter': 'Sposobnost da se prilagodi lista detalja praćenih u skloništu', 'Ability to customize the list of human resource tracked at a Shelter': 'Sposobnost prilagođavanja liste ljudskih resursa praćene u skloništu', 'Ability to customize the list of important facilities needed at a Shelter': 'Sposobnost da se prilagodi lista važnih objekata potrebnih u skloništu', 'Ability to Fill Out Surveys': 'Mogućnost ispunjavanja ankete', 'Ability to view Results of Completed and/or partially filled out Surveys': 'Mogućnost pregleda rezultata završenih i/ili djelimično popunjenih anketa', 'Abkhazia': 'Abhazija', 'Able to Respond?': 'U mogućnosti odgovoriti?', 'About': 'O programu', 'ABOUT': 'O', 'About Sahana': 'O Sahana', 'About Sahana Eden': 'O Sahana Eden', 'ABOUT THIS MODULE': 'O OVOM MODULU', 'About this module': 'O ovom modulu', 'Above %s': 'Iznad %s', 'Academic': 'Akademska', 'Accept Push': 'Prihvati guranje', 'Accept unsolicited data transmissions from the repository.': 'Prihvati neplanirane prenose podataka iz repozitorija.', 'ACCESS DATA': 'PRISTUPNI PODACI', 'Access denied': 'Zabranjen pristup', 'Access to education services': 'Pristup obrazovnim uslugama', 'Access to Shelter': 'Pristup skloništu', 'Accessibility of Affected Location': 'Pristupnost pogođenih lokacija', 'Accompanying Relative': 'Član rodbine koji je pratnja', 'Account added': 'Nalog dodana', 'Account Registered - Please Check Your Email': 'Korisnički račun registrovan - molimo provjerite svoj Email', 'Account registered, however registration is still pending approval - please wait until confirmation received.': 'Korisnički nalog registrovan, ali prijava još čeka odobrenje od ovlaštene osobe - molimo pričekajte dok se prijava ne odobri.', 'Accuracy': 'Preciznost', 'Acronym': 'Akronim', "Acronym of the organization's name, eg. IFRC.": 'Akronim od naziva organizacije, npr IFRC.', 'act': 'akt', 'Action': 'Akcija', 'ACTION REQUIRED': 'AKCIJA POTREBNA', 'Actionable': 'Djelatno', 'Actionable by all targeted recipients': 'Ima razloga da se djeluje prema svim ciljanim primateljima', 'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Moguće pokrenuti samo od strane imenovanih učesnika vježbe; identifikator vježbe treba da se pojavi u polju <note>', 'Actioned?': 'Riješeno?', 'Actioning officer': 'Zaduženi službenik', 'Actions': 'Akcije', 'Actions taken as a result of this request.': 'Akcije preduzete kao rezultat ovog zahtjeva.', 'Activate': 'Aktiviraj', 'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': 'Aktiviraj događaje iz šablona scenarija za alokaciju odgovarajućih resursa (ljudi, sredstva i objekti).', 'activate to sort column ascending': 'aktivno za sortiranje kolone u rastućem redoslijedu', 'activate to sort column descending': 'aktivno za sortiranje kolone u opadajućem redoslijedu', 'active': 'aktivno', 'Active': 'Aktivno', 'Active Problems': 'Aktivni problemi', 'Active?': 'Aktivan?', 'Activities': 'Aktivnosti', 'Activities matching Assessments': 'Aktivnosti koje odgovaraju procjenama', 'Activities matching Assessments:': 'Aktivnosti koje odgovaraju procjenama:', 'Activities of boys 13-17yrs before disaster': 'Aktivnosti dječaka dobi 13-17 godina prije katastrofe', 'Activities of boys 13-17yrs now': 'Aktivnosti dječaka između 13 i 17 godina', 'Activities of boys <12yrs before disaster': 'Aktivnosti dječaka mlađih od 12 godina prije nepogode', 'Activities of boys <12yrs now': 'Trenutne aktivnosti dječaka <12 godina', 'Activities of children': 'Aktivnosti djece', 'Activities of girls 13-17yrs before disaster': 'Aktivnosti djevojaka 13-17 godina prije katastrofe', 'Activities of girls 13-17yrs now': 'Trenutne aktivnosti djevojčica između 13 i 17 godina', 'Activities of girls <12yrs before disaster': 'Aktivnosti djevojčica mlađih od 12 godina prije katastrofe', 'Activities of girls <12yrs now': 'Aktivnosti djevojčica <12god sada', 'Activities:': 'Aktivnosti:', 'Activity': 'Aktivnost', 'Activity Added': 'Dodana aktivnost', 'Activity added': 'Dodana aktivnost', 'Activity Deleted': 'Obrisana aktivnost', 'Activity Details': 'Detalji aktivnosti', 'Activity Organization': 'Organizacija aktivnosti', 'Activity Organization Added': 'Dodana organizacija aktivnosti', 'Activity Organization Deleted': 'Organizacija aktivnosti obrisana', 'Activity Organization Updated': 'Organizacija aktivnosti ažurirana', 'Activity Organizations': 'Organizacije aktivnosti', 'Activity removed': 'Aktivnost uklonjena', 'Activity Report': 'Izvještaj o aktivnosti', 'Activity Reports': 'Izvještaji aktivnosti', 'Activity Type': 'Tip aktivnosti', 'Activity Type Added': 'Dodan tip aktivnosti', 'Activity Type added to Activity': 'Tip aktivnosti dodan u aktivnost', 'Activity Type added to Project Location': 'Vrsta aktivnosti dodana na lokaciju projekta', 'Activity Type Deleted': 'Izbrisan tip aktivnosti', 'Activity Type removed from Activity': 'Tip aktivnosti uklonjen iz aktivnosti', 'Activity Type removed from Project Location': 'Vrsta aktivnosti uklonjena iz lokacije projekta', 'Activity Type Updated': 'Ažuriran tip aktivnosti', 'Activity Types': 'Tipovi aktivnosti', 'Activity Updated': 'Djelatnost ažurirana', 'Activity updated': 'Aktivnost ažurirana', 'Add': 'Dodati', 'Add %(site_label)s Status': 'Dodaj %(site_label)s status', 'Add a new certificate to the catalog.': 'Dodaj novi certifikat u katalog', 'Add a new competency rating to the catalog.': 'Dodaj novu ocjenu sposobnosti u katalog.', 'Add a new course to the catalog.': 'Dodaj novi kurs u katalog', 'Add a New Inventory Location': 'Dodaj novu lokaciju skladišta', 'Add a new job role to the catalog.': 'Dodaj novu poziciju u katalog', 'Add a new program to the catalog.': 'Dodaj novi program u katalog', 'Add a New Relief Item': 'Dodaj novu stavku pomoći', 'Add a new Site from where the Item is being sent.': 'Navedite mjesto gdje se šalje ova stavka.', 'Add a new skill provision to the catalog.': 'Dodaj novu zalihu vještina u katalog.', 'Add a new skill to the catalog.': 'Dodaj novu vještinu u katalog', 'Add a new skill type to the catalog.': 'Dodaj novi tip vještine u katalog', 'Add a new vehicle category': 'Dodaj novu kategoriju vozila', 'Add a new vehicle type': 'Dodaj novi tip vozila.', 'Add a Person': 'Dodaj osobu', 'Add a Reference Document such as a file, URL or contact person to verify this data.': 'Dodaj prateći dokument u vidu datoteke, URL-a ili kontakt osobe za potvrdu ovih podataka.', 'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Dodajte referencu, kao što je dokument, URL ili kontakt osobu da potvrdi ove podatke. Ako ne stavite referencu, prikazat će se vaš mail.', 'Add a Volunteer': 'Dodaj volontera', 'Add Activity': 'Dodaj aktivnost', 'Add Activity Report': 'Dodaj izvještaj o radu', 'Add Activity Type': 'Dodaj tip aktivnosti', 'Add Address': 'Dodaj Adresu', 'Add Affiliation': 'Dodaj namještenje', 'Add Aid Request': 'Dodaj zahtjev za pomoć', 'Add all organizations which are involved in different roles in this project': 'Dodaj sve organizacije koje su uključene u različite uloge u ovom projektu', 'Add Alternative Item': 'Dodaj alternativnu stavku', 'Add an image, such as a Photo.': 'Dodaj sliku,kao sto je fotografija', 'Add an Photo.': 'Dodaj fotografiju.', 'Add Annual Budget': 'Dodaj godišnji budžet', 'Add Appraisal': 'Dodaj ispunjenje', 'Add Assessment': 'Dodaj procjenu', 'Add Assessment Summary': 'Dodaj sažetak o procjeni', 'Add Asset': 'Dodaj sredstvo', 'Add Asset Log Entry - Change Label': 'Dodaj stavku zapisnika sredstava - promijeni oznaku', 'Add Availability': 'Dodaj dostupnost', 'Add Award': 'Dodaj nagradu', 'Add Baseline': 'Dodaj referentnu tačku', 'Add Baseline Type': 'Dodaj tip referentne tačke', 'Add Bed Type': 'Dodaj vrstu ležaja', 'Add Beneficiaries': 'Dodaj korisnike', 'Add Bin Type': 'Dodaj tip korpe', 'Add Bins': 'Dodaj korpe', 'Add Bookmark': 'Dodaj zabilješku', 'Add Branch Organization': 'Dodaj pripadnu organizaciju', 'Add Brand': 'Dodaj marku proizvoda', 'Add Budget': 'Dodaj budžet', 'Add Bundle': 'Dodaj paket', 'Add Camp': 'Dodaj kamp', 'Add Camp Service': 'Dodaj uslugu kampa', 'Add Camp Status': 'Dodaj status kampa', 'Add Camp Type': 'Dodaj tip kampa', 'Add Campaign Message': 'Dodaj poruku kampanje', 'Add Catalog': 'Dodaj Katalog', 'Add Catalog Item': 'Dodaj katalog stavku', 'Add Catalog.': 'Dodaj katalog.', 'Add Category': 'Dodaj kategoriju', 'Add Category<>Sub-Category<>Catalog Relation': 'Dodaj Kategorija<>Podkategorija<>kataloški odnos', 'Add Certificate': 'Dodaj certifikat', 'Add Certificate for Course': 'Dodaj certifikat za kurs', 'Add Certification': 'Dodja certifikat', 'Add Cholera Treatment Capability Information': 'Dodajte informacije o sposobnosti liječenja kolere', 'Add Cluster': 'Dodaj skup', 'Add Cluster Subsector': 'Dodaj podsektor skupa', 'Add Competency': 'Dodaj stručnost', 'Add Competency Rating': 'Dodaj ocjenu kompetentnosti', 'Add Config': 'Dodaj konfiguraciju.', 'Add Contact': 'Dodaj kontakt', 'Add Contact Information': 'Dodajte kontakt informacije', 'Add Course': 'Dodaj kurs', 'Add Course Certicate': 'Dodaj certifikat kursa', 'Add Credential': 'Dodaj akreditiv', 'Add Credentials': 'Dodaj akreditive', 'Add Data to Theme Layer': 'Dodaj podatke tematskom sloju', 'Add Dead Body Report': 'Dodaj izvještaj o preminulim osobama', 'Add Disaster Victims': 'Dodaj žrtve nepogode', 'Add Distribution': 'Dodaj distribuciju', 'Add Distribution Item': 'Dodaj stavku raspodjele', 'Add Distribution.': 'Dodaj distribuciju', 'Add Document': 'Dodaj dokument', 'Add Donation': 'Dodaj donaciju', 'Add Donor': 'Dodaj donatora', 'Add Education Detail': 'Dodaj detalje o obrazovanju', 'Add Education Level': 'Dodaj nivo obrazovanja', 'Add Email Account': 'Dodaj e-mail nalog', 'Add Facility': 'Dodaj objekat', 'Add Feature Class': 'Dodaj klasu karakteristika', 'Add Feature Group': 'Dodaj grupu karakteristika', 'Add Feature Layer': 'Dodaj sloj karakteristika', 'Add Find Report': 'Dodaj novi izvještaj o traženju', 'Add Flood Report': 'Dodaj izvještaj o poplavi', 'Add GIS Feature': 'Dodaj GIS karakteristiku', 'Add GPS data': 'Dodaj GPS podatke', 'Add Group': 'Dodaj grupu', 'Add Group Member': 'Dodaj člana grupe', 'Add Group Membership': 'Dodaj članstvo grupe', 'Add Hospital': 'Dodaj Bolnicu', 'Add Hours': 'Dodaj sate', 'Add Human Resource': 'Dodaj ljudski resurs', 'Add Identification Report': 'Dodaj izvještaj o identifikaciji', 'Add Identity': 'Dodaj identitet', 'Add Image': 'Dodaj sliku', 'Add Impact': 'Dodaj utjecaj', 'Add Impact Type': 'Dodaj tip utjecaja', 'Add Incident': 'Dodaj incident', 'Add Incident Report': 'Dodaj izvještaj o incidentu', 'Add Incoming Email': 'Dodaj dolaznu e-pošte', 'Add Incoming SMS': 'Dodaj dolazni SMS', 'Add Inventory Item': 'Dodaj stavkuinventara', 'Add Inventory Store': 'Dodaj novi smještaj inventara', 'Add Item': 'Dodaj stavku', 'Add Item (s)': 'Dodaj stavku', 'Add Item Catalog': 'Dodaj katalog stavki', 'Add Item Catalog Category': 'Dodaj kategoriju stavke kataloga', 'Add Item Category': 'Dodaj kategoriju stavke', 'Add Item Pack': 'Dodaj paket stavki', 'Add Item Packet': 'Dodaj paket stavki', 'Add Item Sub-Category': 'Dodaj podkategoriju stavke', 'Add Item to Catalog': 'Dodaj stavku u katalog', 'Add Item to Commitment': 'Dodaj stavku zaduženja', 'Add Item to Inventory': 'Dodaj stavku u inventar', 'Add Item to Request': 'Dodati stavku na zahtjev', 'Add Item to Shipment': 'Dodaj Stavku u Pošiljku', 'Add Item to Stock': 'Dodaj stavku u zalihu', 'Add Job Role': 'Dodajte poziciju za posao', 'Add Key': 'Dodaj ključ', 'Add Keyword': 'Dodaj ključnu riječ', 'Add Kit': 'Dodaj komplet', 'Add Layer': 'Dodaj sloj', 'Add Layer to this Profile': 'Dodaj sloj u profil', 'Add Level 1 Assessment': 'Dodaj procjenu nivoa 1', 'Add Level 2 Assessment': 'Dodaj 2. level procjene', 'Add Line': 'Dodaj liniju', 'Add Location': 'Dodaj Lokaciju', 'Add Locations': 'Dodaj lokacije', 'Add Log Entry': 'Dodaj stavku zapisnika', 'Add main Item Category.': 'Dodaj glavnu kategoriju stavke', 'Add main Item Sub-Category.': 'Dodaj glavnu podkategoriju stavke', 'Add Map Profile': 'Dodaj podešenje mape', 'Add Marker': 'Dodaj oznaku', 'Add Member': 'Dodaj novog člana', 'Add Membership': 'Dodaj članstvo', 'Add Message': 'Dodaj poruku', 'Add Metadata': 'Dodaj metapodatke', 'Add Mission': 'Dodaj misiju', 'Add Mobile Commons Settings': 'Dodaj mobilne postavke', 'Add Need': 'Dodaj potrebu', 'Add Need Type': 'Dodaj tip potrebe', 'Add New': 'Dodaj nov', 'Add New Activity': 'Dodaj novu aktivnost', 'Add New Address': 'Dodaj novu adresu', 'Add New Aid Request': 'Dodaj novi zahtjev za pomoć', 'Add New Alternative Item': 'Dodaj novi alternativni artikl', 'Add New Assessment': 'Dodaj novu procjenu', 'Add New Assessment Summary': 'Dodaj novi rezime procjene', 'Add New Asset': 'Dodaj novo sredstvo', 'Add New Baseline': 'Dodaj novu referentnu tačku', 'Add New Baseline Type': 'Dodaj novi tip referentne tačke', 'Add New Bin': 'Dodaj novu korpu', 'Add New Bin Type': 'Dodaj novi tip korpe', 'Add New Brand': 'Dodaj novu marku', 'Add New Budget': 'Dodaj novi budžet', 'Add New Bundle': 'Dodaj novi paket', 'Add New Camp': 'Dodaj Novi Kamp', 'Add New Camp Service': 'Dodaj novu uslugu Kampa', 'Add New Camp Type': 'Dodaj novi tip kampa', 'Add New Catalog': 'dodaj novi katalog', 'Add New Catalog Item': 'Dodaj novu stavku kataloga', 'Add New Cluster': 'Dodaj novi skup', 'Add New Cluster Subsector': 'Dodajte novi podsektor skupa', 'Add New Commitment Item': 'Dodaj novo zaduženje', 'Add New Config': 'Dodaj novu konfiguraciju.', 'Add New Contact': 'Dodaj Novi Kontakt', 'Add New Credential': 'Dodaj novi akreditiv', 'Add New Distribution': 'Dodaj novu raspodjelu', 'Add New Document': 'Dodaj novi dokument', 'Add New Donor': 'Dodaj novog donatora', 'Add New Entry': 'Dodaj novi element', 'Add New Event': 'Dodaj novi događaj', 'Add New Facility': 'Dodaj novi objekt', 'Add New Feature Class': 'Dodaj novu klasu karakteristika', 'Add New Feature Group': 'Dodaj novu grupu karakteristika', 'Add New Feature Layer': 'Dodaj novi sloj karakteristika', 'Add New Find Report': 'Dodaj novi izvještaj o traženju', 'Add New Flood Report': 'Dodaj novi izvještaj o poplavi', 'Add New Group': 'Dodaj novu grupu', 'Add new Group': 'Dodaj novu grupu', 'Add New Group Membership': 'Dodaj novo članstvo grupe', 'Add New Home': 'Dodaj novi dom', 'Add New Hospital': 'Dodaj novu bolnicu', 'Add New Human Resource': 'Dodaj novi ljudski resurs', 'Add New Identity': 'Dodaj novi identitet', 'Add New Image': 'Dodaj novu sliku', 'Add New Impact': 'Dodaj novi utjecaj', 'Add New Impact Type': 'Dodaj novi tip utjecaja', 'Add New Incident': 'Dodaj novi incident', 'Add New Incident Report': 'Dodaj novi izvještaj o incidentu', 'Add new Individual': 'Dodaj novu osobu', 'Add New Information': 'Dodaj nove informacija', 'Add New Inventory Item': 'Dodaj novi artikl u inventar', 'Add New Inventory Store': 'Dodaj novi smještaj inventara', 'Add New Item': 'Dodajte novu stavku', 'Add New Item Catalog': 'Kreiraj novu stavku kataloga', 'Add New Item Catalog Category': 'Dodaj novu kategoriju stavke kataloga', 'Add New Item Category': 'Dodaj novu kategoriju', 'Add New Item Pack': 'Dodaj novi paket stavki', 'Add New Item Packet': 'Dodaj novi paket staviki', 'Add New Item Sub-Category': 'Dodaj novu podkategoriju stavke', 'Add New Item to Kit': 'Dodaj novu stavku u komplet', 'Add New Key': 'Dodaj novi ključ', 'Add New Kit': 'Dodaj novi komplet', 'Add New Layer': 'Dodaj novi sloj', 'Add New Level 1 Assessment': 'Dodaj Procjenu za Novi Nivo 1', 'Add New Level 2 Assessment': 'Dodaj novu procjenu nivoa 2', 'Add New Location': 'Dodaj novu lokaciju', 'Add New Log Entry': 'Dodaj novi unos zapisnika', 'Add New Map Profile': 'Dodajte novu konfiguraciju plana', 'Add New Marker': 'Dodaj novi marker', 'Add New Member': 'Dodajte Novog Člana', 'Add New Membership': 'Dodaj novo članstvo', 'Add New Metadata': 'Dodaj novi metapodatak ', 'Add New Need': 'Dodajte novu potrebu', 'Add New Need Type': 'Dodaj Novi Tip Potrebe', 'Add New Note': 'Kreiraj novu bilješku', 'Add New Office': 'Dodaj novi ured', 'Add New Organization': 'Dodaj novu organizaciju', 'Add New Partner': 'Dodaj novog partnera', 'Add new Patient': 'Dodaj novog pacijenta', 'Add New Patient': 'Dodaj novog pacijenta', 'Add New Peer': 'Dodaj novog suradnika', 'Add New Person': 'Dodaj novu osobu.', 'Add New Person to Commitment': 'Dodaj novu osobu u zaduženje', 'Add new person.': 'Dodaj novu osobu.', 'Add New Photo': 'Dodaj novu fotografiju', 'Add New Population Statistic': 'Dodati novu statistiku populacije', 'Add new position.': 'Dodaj novu poziciju.', 'Add New Problem': 'Dodaj novi problem', 'Add New Project': 'Dodaj novi projekt', 'Add new project.': 'Dodaj novi projekat', 'Add New Projection': 'Dodaj novu projekciju', 'Add New Rapid Assessment': 'dodaj novu brzu procjenu', 'Add New Received Item': 'Dodaj novi primljeni predmet', 'Add New Record': 'Dodaj novi zapis', 'Add New Relative': 'Dodaj novog srodnika', 'Add New Relief Item': 'Dodaj novu stavku pomoći', 'Add New Report': 'Dodaj novi izvještaj', 'Add New Request': 'Dodaj novi zahtjev', 'Add New Request Item': 'Dodaj novu stavku zahtjeva', 'Add New Resource': 'Dodaj novi resurs', 'Add New Resource Type': 'Dodaj novi tip resursa', 'Add New Response': 'Dodaj novi odgovor', 'Add New River': 'dodaj novu rijeku', 'Add New Role': 'Dodaj novu ulogu', 'Add New Role to User': 'Dodajte novu ulogu korisniku', 'Add New Room': 'Dodaj novu prostoriju', 'Add New Scenario': 'Dodaj novi scenarij', 'Add New School District': 'Dodaj novi školski rejon', 'Add New School Report': 'Dodaj novi izvještaj o školama', 'Add New Sent Item': 'Dodaj novu poslanu stavku', 'Add New Setting': 'Dodaj novu postavku', 'Add New Shelter': 'Dodaj novo sklonište', 'Add New Shelter Service': 'Dodavanje nove usluge skloništa', 'Add New Shelter Type': 'Dodaj novi tip skloništa', 'Add New Shipment to Send': 'Dodaj novu pošiljku u slanje', 'Add New Site': 'Dodaj novo mjesto', 'Add New Skill': 'Dodaj novu vještinu', 'Add New Skill Type': 'Dodaj novi tip vještine', 'Add New Solution': 'Dodaj novo rješenje', 'Add New Source': 'Dodaj novi izvor', 'Add New Staff': 'Dodaj novo osoblje', 'Add New Staff Member': 'Dodaj novog člana osoblja', 'Add new staff role.': 'Dodati u ulogu osoblja', 'Add New Staff Type': 'Dodaj novi tip osoblja', 'Add new staff.': 'Dodaj novo osoblje.', 'Add New Storage Location': 'Dodaj novu lokaciju o smještaju', 'Add New Subsector': 'Dodaj novi podsektor', 'Add New Survey Answer': 'Dodaj novi odgovor za anketu', 'Add New Survey Question': 'Dodaj novo anketno pitanje', 'Add New Survey Section': 'Dodaj novo pitanje za anketu', 'Add New Survey Series': 'Dodaj novi niz anketa', 'Add New Survey Template': 'Dodajte novi ankentni šablon', 'Add New Task': 'Dodaj novi zadatak', 'Add New Team': 'Dodaj novi tim', 'Add New Theme': 'Dodaj novu temu', 'Add New Ticket': 'Dodaj novu karticu', 'Add New Track': 'Dodaj novo praćenje', 'Add New Unit': 'Dodaj novu jedinicu', 'Add New User': 'Dodaj novog korisnika', 'Add New User to Group': 'Doda novog korisnika u grupu', 'Add New User to Role': 'Dodaj novog korisnika ulozi', 'Add New Vehicle': 'Dodaj novo vozilo', 'Add New Vehicle Type': 'Dodaj novi tip VOZILA', 'Add New Volunteer': 'Dodaj novog volontera', 'Add New Warehouse': 'Dodajte novo skladište', 'Add New Warehouse Item': 'Dodaj novu stavku skladišta', 'Add Note': 'Dodaj bilješku', 'Add Office': 'Dodaj ured', 'Add or Update': 'Dodavanje ili ažuriranje', 'Add Order': 'Dodaj narudžbu', 'Add Organization': 'Dodaj organizaciju', 'Add Organization Domain': 'Dodaj domenu organizaciju', 'Add Organization Needs': 'Dodaj potrebe organizacije', 'Add Organization to Activity': 'Dodaj organizaciju u aktivnost', 'Add Organization to Project': 'Dodaj organizaciju projektu', 'Add Participant': 'Dodaj učesnika', 'Add Partner': 'Dodaj partnera', 'Add Peer': 'Dodaj saradnika', 'Add People to Commitment': 'Dodaj ljude u zaduženje', 'Add Person': 'Dodaj osobu', 'Add Person to Commitment': 'Dodaj osobu u zaduženje', "Add Person's Details": 'Dodaj detalje o osobi', 'Add Personal Effects': 'Dodaj vlastite efekte', 'Add Photo': 'Dodajte sliku', 'Add Point': 'Dodaj tačku', 'Add Polygon': 'Dodaj mnogougao', 'Add Population Statistic': 'Dodaj statistiku o stanovništvu', 'Add Position': 'Dodaj poziciju', 'Add Problem': 'Dodaj problem', 'Add Professional Experience': 'Dodaj profesionalno iskustvo', 'Add Profile Configuration for this Layer': 'Dodaj konfiguraciju profila za ovaj sloj', 'Add Project': 'Dodaj projekat', 'Add Projection': 'Dodajte projekciju', 'Add Projections': 'Dodaj projekciju', 'Add Question': 'Dodaj pitanje', 'Add Rapid Assessment': 'Dodaj Brzu Procjenu', 'Add Recipient': 'Dodaj primaoca', 'Add Recipient Site': 'Dodaj mjesto primaoca', 'Add Recipient Site.': 'Dodaj lokaciju primaoca', 'Add Record': 'Dodaj zapis', 'Add Recovery Report': 'Dodaj izvještaj o pronalaženju', 'Add Reference Document': 'Dodaj prateći/referentni dokument', 'Add Region': 'Dodaj područje', 'Add Relief Item': 'Dodaj stavku pomoći', 'Add Report': 'Dodaj izvještaj', 'Add Request': 'Dodaj zahtjev', 'Add Request Detail': 'Dodaj detalje o zahtjevu', 'Add Request Template': 'Dodaj predložak zahtjeva', 'Add Resource': 'Dodaj Resurs', 'Add Resource Type': 'Dodaj tip resursa', 'Add Response': 'Dodaj odgovor', 'Add Response Summary': 'Dodaj sumarni odgovor', 'Add River': 'Dodaj rijeku', 'Add Role': 'Dodaj ulogu', 'Add Room': 'Dodajte prostoriju', 'Add RSS Settings': 'Dodaj RSS Postavke', 'Add School District': 'Dodaj školski rejon', 'Add School Report': 'Dodaj školski izvještaj', 'Add Section': 'Dodaj sekciju', 'Add Sector': 'Dodaj sektor', 'Add Sender Site.': 'Dodaj lokaciju pošiljaoca.', 'Add Service': 'Dodaj usluga', 'Add Service Profile': 'Dodaj profil usluga', 'Add Setting': 'Dodaj Postavke', 'Add Shelter': 'Dodaj sklonište', 'Add Shelter Service': 'Dodaj uslugu skloništa', 'Add Shelter Type': 'Dodaj tip skloništa', 'Add Site Needs': 'Dodaj potrebe mjesta', 'Add Skill': 'Dodaj vještinu', 'Add Skill Equivalence': 'Dodaj ekvivalenciju vještine', 'Add Skill Provision': 'Dodaj pružanje vještina', 'Add Skill to Request': 'Dodati vještinu u zahtjev', 'Add Skill Type': 'Dodaj tip vještine', 'Add Skill Types': 'Dodaj tip vještine', 'Add Solution': 'Dodaj rješenje', 'Add Source': 'Dodaj izvor', 'Add Staff': 'Dodaj članove osoblja', 'Add Staff Member': 'Dodaj člana osoblja', 'Add staff members': 'Dodaj članove osoblja', 'Add Staff Type': 'Dodaj tip osoblja', 'Add Status': 'Dodaj status', 'Add Stock to Warehouse': 'Dodaj zalihu u skladište', 'Add Storage Bin Type': 'Dodaj tip korpe za smještaj', 'Add Storage Location': 'Dodaj lokaciju skladištenja', 'Add strings manually': 'Dodaj stringove ručno', 'Add strings manually through a text file': 'Dodaj nizove znakova ručno kroz tekstualnu datoteku', 'Add Sub-Category': 'Traži potkategoriju', 'Add Subscription': 'Dodaj pretplatu', 'Add Subsector': 'Dodaj podsektor', 'Add Survey Answer': 'Dodaj odgovor na istraživanje', 'Add Survey Question': 'Dodajte anketno pitanje', 'Add Survey Section': 'Dodajte anketnu sekciju', 'Add Survey Series': 'Dodajte niz anketa', 'Add Survey Template': 'Dodaj predložak anketi', 'Add Symbology to Layer': 'Dodaj značenje simbola na sloj', 'Add Task': 'Dodaj zadatak', 'Add Team': 'Dodaj tim', 'Add Team Member': 'Dodaj člana tima', 'Add the Storage Bin Type.': 'Dodaj tip korpe za smještaj', 'Add the Storage Location where this bin is located.': 'Dodaj lokaciju gdje je ova korpa.', 'Add the Storage Location where this this Bin belongs to.': 'Dodaj lokaciju gdje ova korpa pripada.', 'Add Theme': 'Dodaj temu', 'Add this entry': 'Dodaj ovaj unos', 'Add Ticket': 'Dodaj karticu', 'Add to a Team': 'Dodaj u Tim', 'Add to Bin': 'Dodaj u korpu', 'Add to budget': 'Dodati budžetu', 'Add to Bundle': 'Dodaj u paket', 'Add to Catalog': 'Dodaj u katalog', 'Add to Feature Group': 'Dodaj u grupu karakteristika', 'Add Training': 'Dodaj trening', 'Add Translation Language': 'Dodaj jezik za prevođenje', 'Add Twilio Settings': 'Dodaj Twilio postavke', 'Add Twitter Search Query': 'Dodaj Twitter upit za pretragu', 'Add Unit': 'Dodaj Jedinicu', 'Add User': 'Dodaj korisnika', 'Add Vehicle': 'Dodaj vozilo', 'Add Vehicle Category': 'Dodaj kategoriju vozila', 'Add Vehicle Detail': 'Dodaj detalje o vozilu', 'Add Vehicle Details': 'Dodaj detalje o vozilu', 'Add Vehicle Type': 'Dodaj tip vozila', 'Add Volunteer': 'Dodajte volontera', 'Add Volunteer Availability': 'Dodaj dostupnost volontera', 'Add Volunteer Registration': 'Dodaj registraciju volontera', 'Add volunteers': 'Dodaj volontere', 'Add Warehouse': 'Dodaj skladište', 'Add Warehouse Item': 'Dodaj stavku skladišta', 'Add...': 'Dodaj...', 'Add/Edit/Remove Layers': 'Dodaj/Uredi/Obriši slojeve', 'added': 'dodano', 'Added to Group': 'Dodano u grupu', 'Added to Team': 'Dodano u tim', 'Additional Beds / 24hrs': 'Dodatni kreveti / 24 sata', 'Additional Comments': 'Dodatni komentari', 'Additional quantity quantifier – i.e. “4x5”.': 'Dodatni kvantifikator količine, tj. “4x5”.', 'Address': 'Adresa', 'Address added': 'Dodana adresa', 'Address deleted': 'Obrisana adresa', 'Address Details': 'Detalji adrese', 'Address Found': 'Pronađena adresa', 'Address Mapped': 'Adresa mapirana', 'Address NOT Found': 'Nije pronađena adresa', 'Address NOT Mapped': 'Adresa NIJE mapirana', "Address of an image to use for this Layer in the Legend. This allows use of a controlled static image rather than querying the server automatically for what it provides (which won't work through GeoWebCache anyway).": 'Adresa slike koja će se koristiti za ovaj sloj u legendi. Ovo će omogućiti upotrebu kontrolisane statičke slike umjesto automatskog upita servera za ono što on pruža (što neće raditi s GeoWebCache )', 'Address Type': 'Tip adrese', 'Address updated': 'Ažurirana adresa', 'Addresses': 'Adrese', 'Adequate': 'Odgovarajuće', 'Adequate food and water available': 'Dostupna adekvatna hrana i voda', 'Adjust Item Quantity': 'Prilagodi količinu stavke', 'Adjust Items due to Theft/Loss': 'Prilagodi stavke zbog krađe/gubitka', 'Adjust Stock': 'Prilagodi zalihu', 'Adjust Stock Item': 'Prilagodi stavku zalihe', 'Adjust Stock Levels': 'Prilagodi nivo zalihe', 'Adjustment created': 'Prilagođenje kreirano', 'Adjustment deleted': 'Prilagođenje obrisano', 'Adjustment modified': 'Prilagođenje izmijenjeno', 'Admin Email': 'Email administratora', 'Admin Name': 'Ime administratora', 'Admin Tel': 'Telefon administratora', 'Administration': 'Administracija', 'Admissions/24hrs': 'Ulazi/24 sata', 'Adolescent (12-20)': 'Adolescent (12-20)', 'Adolescent participating in coping activities': 'Učestvovanje adolescenata u aktivnostima prilagođavanja', 'Adult (21-50)': 'Odrasli (21-50)', 'Adult female': 'Odrasla ženska osoba', 'Adult ICU': 'Intenzivna njega za odrasle', 'Adult male': 'Odrasli muškarac', 'Adult Psychiatric': 'Psihijatar za odrasle', 'Adults in prisons': 'Odrasli u zatvoru', 'advanced': 'napredno', 'Advanced Bin Search': 'Napredna pretraga korpi', 'Advanced Catalog Search': 'Napredna pretraga kataloga', 'Advanced Category Search': 'Napredna pretraga kategorija', 'Advanced Item Search': 'Napredna pretragastavki', 'Advanced Sub-Category Search': 'Napredna pretraga podkategorije', 'Advanced Unit Search': 'Napredna pretragajedinica', 'Advanced:': 'Napredno:', 'Advisory': 'Savjeti', 'Advocacy': 'Advokatura', 'Affected Persons': 'Osobe na koje je bio utjecaj', 'Affiliation added': 'Preduzeće dodano', 'Affiliation deleted': 'Preduzeće obrisano', 'Affiliation Details': 'Detalji preduzeća', 'Affiliation updated': 'Preduzeće ažurirano', 'Affiliations': 'Preduzeća', 'Afghanistan': 'Afganistan', 'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'Nakon što kliknete na dugme, pojavit će se niz stavki u paru jedan za drugim. Molimo da odaberete jedno rješenje iz svakog para koje preferirate.', 'After clicking on the Vote button ... (#TODO [String]) Please select the one item from each pair that you prefer over the other.': 'Nakon što kliknete na dugme glasaj, (#TODO [String]). Molimo da odaberete jednu stavku iz svakog para koje preferirate.', 'Age': 'Starost', 'Age group': 'Starosna grupa', 'Age Group': 'Starosna grupa', 'Age group does not match actual age.': 'Starosna grupa ne odgovara stvarnim godinama.', 'Aggravating factors': 'Otežavajući faktori', 'Agriculture': 'Poljoprivreda', 'Aid Management': 'Upravljanje pomoći', 'Aid Request': 'Dodaj zahtjev', 'Aid Request added': 'Zahtjev za pomoć dodan', 'Aid Request Details': 'Detalji o zahtjevu za pomoć', 'Aid Request updated': 'Zahtijev za pomoć je ažuriran', 'Aid Requests': 'Dodaj zahtjeve', 'Air Transport Service': 'Usluga zračnog prijevoznog sredstva', 'Aircraft Crash': 'Pad aviona', 'Aircraft Hijacking': 'Avionska otmica', 'Aircraft Maximum Size': 'Maksimalna veličina aviona', 'Airport': 'Aerodrom', 'Airport added': 'Aerodrom dodan', 'Airport Closure': 'Zatvaranje aerodroma', 'Airport deleted': 'Aerodrom obrisan', 'Airport Details': 'Detalji aerodroma', 'Airport updated': 'Aerodrom ažuriran', 'Airports': 'Aerodromi', 'Airspace Closure': 'Zatvaranje zračnog prostora', 'Albania': 'Albanija', 'Alcohol': 'Alkohol', 'Alcoholics': 'Alkoholičari', 'Alert': 'Uzbuna', 'Alimentary Support Vehicle': 'Vozila za hitnu podršku', 'All': 'Sve', 'ALL': 'Sve', 'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'Svi podaci obezbjeđeni od strane Sahana Software fondacije sa ove stranice su licencirani pod Creative Commons Attribution licencom. Međutim, svi podaci ne potiču odavde. Molimo, pregledajte polje izvora svakog pristupa.', 'All data provided by the Sahana Software Foundation from this site is licensed under a Creative Commons Attribution license. However, not all data originates here. Please consult the source field of each entry.': 'Svi podaci obezbjeđeni od strane Sahana Software fondacije sa ove stranice su licencirani pod Creative Commons Attribution licencom. Međutim, svi podaci ne potiču odavde. Molimo, pregledajte polje izvora svakog pristupa.', 'All Entities': 'Sve jedinke', 'All Inbound & Outbound Messages are stored here': 'Sve ulazne i izlazne poruke su smještene ovdje', 'All Open Tasks': 'Svi otvoreni zadaci', 'All Pledges': 'Svi zahtjevi', 'All Records': 'Svi zapisi', 'all records': 'svi zapisi', 'All Requested Items': 'Sve zahtijevani stavke', 'All Resources': 'Svi Resursi', 'All selected': 'Sve izabrano', 'All Tasks': 'Svi zadaci', 'Allowed to push': 'Dozvoljeno gurnuti', 'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'dozvoljava da se budžet uspostavi na osnovu troškova osoblja i opreme, uključujući bilo koje režijske troškove administratora.', 'Allows a Budget to be drawn up': 'Dozvoljava izradu budžeta', 'Allows authorized users to control which layers are available to the situation map.': 'Omogućava ovlaštenim korisnicima da kontrolišu koji slojevi su dostupni na karti situacije.', 'Allows authorized users to upload multiple features into the situation map.': 'Omogućava ovlaštenim korisnicima da pošalju više karakteristima na kartu situacije.', 'allows for creation and management of assessments.': 'dozvoljava kreiranje i upravljanje procjenama.', 'allows for creation and management of surveys to assess the damage following a natural disaster.': 'dozvoljava kreiranje i upravljanje istraživanjima za procjenu nesreće uzrokovane prirodnom katastrofom', 'Already in this Feature Group!': 'Već je u ovoj grupi karakteristika', 'Alternative infant nutrition in use': 'Alternativa u prehrani djece', 'Alternative Item': 'Alternativna stavka', 'Alternative Item added': 'Alternativna stavka dodana', 'Alternative Item deleted': 'Alternativna stavka obrisana', 'Alternative Item Details': 'Detalji alternativne stavke', 'Alternative Item updated': 'Alternativna stavka ažurirana', 'Alternative Items': 'Alternativne stavke', 'Alternative places for studying': 'Alternativna mjesta za učenje', 'Alternative places for studying available': 'Dostupna alternativna mjesta za studiranje', 'always update': 'uvijek ažuriraj', 'Ambulance Service': 'Usluge u ambulanti', 'Amount': 'Iznos', 'Amount of the Project Budget spent at this location': 'Iznos budžeta projekta potrošen na ovoj lokaciji', 'An Assessment Template can be selected to create a Disaster Assessment. Within a Disaster Assessment, responses can be collected and results can analyzed as tables, charts and maps': 'Predložak procjene se može izabrati za kreiranje procjene katastrofe. Unutar procjene katastrofe, odgovori se mogu sakupiti a rezultati analizirani kao tabele, dijagrami i mape.', 'An error occured, please %(reload)s the page.': 'Desila se greška, molim %(reload)s stranicu.', 'An ESRI Shapefile (zipped)': 'ESRI indeks datoteka s likovima (kompresovana zip)', 'an individual/team to do in 1-2 days': 'pojedinac-tim da to uradi u 1 do 2 dana', 'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'Usisni sistem, sistem upravljanja skladšitem, praćenje robe, upravljanje lancem nabavke, nabavka i ostala sredstva, te sposobnosti upravljanja resursima.', 'An interactive map the situation.': 'Interaktivna mapa situacije', 'An Item Category must have a Code OR a Name.': 'Kategorija sredstva mora imati šifru ili ime.', 'An item which can be used in place of another item': 'Stavka koja se može koristiti umjesto druge stavke', 'Analysis of assessments': 'Analiza procjena', 'Analysis of Completed Surveys': 'Analiza kompletiranih anketa', 'Analyze with KeyGraph': 'Analiza pomoću KeyGraph', 'Anamnesis': 'Anamneza', 'and': 'i', 'Andorra': 'Andora', 'angular': 'uglaono', 'Animal Die Off': 'Izumiranje životinja', 'Animal Feed': 'Hrana za životinje', 'Animals': 'Životinje', 'Annual Budget': 'Godišnji budžet', 'Annual Budget deleted': 'Obrisan godišnji budžet', 'Annual Budget updated': 'Ažuriran godišnji budžet', 'Annual Budgets': 'Godišnji budžeti', 'Anonymous': 'Anoniman', 'anonymous user': 'anonimni korisnik', 'Answer Choices (One Per Line)': 'Izbor odgovora (Jedan po liniji)', 'Anthropology': 'Antropologija', 'Antibiotics available': 'Dostupni antibiotici', 'Antibiotics needed per 24h': 'Antibiotici potrebni u 24 sata', 'Antigua and Barbuda': 'Antigua i Barbuda', 'Any': 'Bilo koji', 'ANY': 'BILO KOJE', 'Any comments about this sync partner.': 'Neki komentari o sinhronizacijskom partneru', 'API is documented here': 'Aplikacijski programerski interfejs (API) je ovdje dokumentiran', 'API Key': 'API ključ', 'Apparent Age': 'Prividne godine', 'Apparent Gender': 'Vidljiv spol', 'Appearance': 'Izgled', 'Applicable to projects in Pacific countries only': 'Primjenjivo samo na projekte u pacifičkim zemljama', 'Application': 'Aplikacija', 'Application Deadline': 'Zadnji rok za prijavu', 'Application Permissions': 'Aplikacijske dozvole', 'Apply': 'Primijeni', 'Appraisal added': 'Ispunjenje dodano', 'Appraisal deleted': 'Ispunjenje obrisano', 'Appraisal Details': 'Detalji ispunjenja', 'Appraisal updated': 'Ispunjenje ažurirano', 'Appraisals': 'Ispunjenja', 'Appropriate clothing available': 'Odgovarajuća odjeća dostupna', 'Appropriate cooking equipment/materials in HH': 'Odgovarajuća oprema za kuhanje/materijali u domaćinstvu', 'Approve': 'Odobri', 'approved': 'Odobreno', 'Approved': 'Odobreno', 'Approved By': 'Potvrdio', 'Approver': 'Onaj koji odobrava', 'Approx. number of cases/48h': 'Približan broj slučaja u 48 h', 'Approximately how many children under 5 with diarrhea in the past 48 hours?': 'Približno koliko djece s dijarejom ispod 5 godina u zadnjih 48 sati?', 'Arabic': 'Arapski', 'ArcGIS REST Layer': 'ArcGIS REST sloj', 'Archive not Delete': 'Arhiva ne briši', 'Arctic Outflow': 'artički odljev', 'Are breast milk substitutes being used here since the disaster?': 'Da li se koriste zamjene za majčino mlijeko nakon katastrofe?', 'are mandatory and must be filled': 'su obavezna polja i moraju biti popunjena', 'Are there adults living in prisons in this area?': 'Ima li odrazlih u zatvoru u ovom području?', 'Are there alternative places for studying?': 'Postoje li alternativna mjesta za studiranje?', 'Are there cases of diarrhea among children under the age of 5?': 'Ima li slučajeva dijareje među djecom ispod 5 godina?', 'Are there children living in adult prisons in this area?': 'Ima li djece u zatvoru za odrasle u ovom području?', 'Are there children living in boarding schools in this area?': 'Ima li djece u internatima u ovom području?', 'Are there children living in homes for disabled children in this area?': 'Ima li djece u kućama za djecu s invaliditetom u ovom području?', 'Are there children living in juvenile detention in this area?': 'Ima li djece u pritvoru za maloljetnike?', 'Are there children living in orphanages in this area?': 'Ima li djece u domovima za napuštenu djecu u ovom području?', 'Are there older people living in care homes in this area?': 'Ima li starijih ljudi u domovima u ovom području?', 'Are there separate latrines for women and men available?': 'Da li su dostupni odvojeni zahodi za žene i muškarce?', 'Are you sure you want to commit to this request and send a shipment?': 'Da li ste sigurni da želite potvrditi ovaj zahtjev i poslati pošiljku', 'Are you sure you want to delete this record?': 'Jeste li sigurni da želite obrisati ovaj zapis?', 'Are you sure you want to send this shipment?': 'Jeste li sigurni da želite poslati ovu pošiljku?', 'Are you susbscribed?': 'Jeste li pretplaćeni?', 'Area': 'Površina', 'Areas inspected': 'Istražena područja', 'Arguments': 'Argumenti', 'Armenia': 'Armenija', 'Arrived': 'Pristiglo', 'artificial': 'vještački', 'Artificial eye left': 'Vještačko lijevo oko', 'As of yet, no sections have been added to this template.': 'Do sada nisu nove sekcije dodate u šablon', 'Assessment': 'Procjena', 'Assessment added': 'Procjena dodana', 'Assessment admin level': 'Nivo administratora procjene', 'Assessment Answer added': 'Dodan odgovor ocjene', 'Assessment Answer deleted': 'Obrisan odgovor ocjene', 'Assessment Answer Details': 'Detalji odgovora ocjene', 'Assessment Answer updated': 'Ažuriran odgovor ocjene', 'Assessment Answers': 'Odgovori ocjene', 'Assessment deleted': 'Procjena je izbrisana', 'Assessment Details': 'Detalji procjene', 'Assessment Question added': 'Pitanja ocjene dodana', 'Assessment Question deleted': 'Pitanja ocjene obrisana', 'Assessment Question Details': 'Detalji pitanja ocjene', 'Assessment Question updated': 'Pitanje ocjene ažurirano', 'Assessment Questions': 'Pitanja ocjene', 'Assessment Reported': 'Procjena izvještena', 'Assessment Summaries': 'Kratka procjena', 'Assessment Summary added': 'Dodat rezime procjene', 'Assessment Summary deleted': 'Izbrisan je rezime procjena', 'Assessment Summary Details': 'Detalji sažetka procjene', 'Assessment Summary updated': 'Sažetak procjene ažuriran', 'Assessment Template added': 'Dodan predložak ocjene', 'Assessment Template deleted': 'Obrisan predložak ocjene', 'Assessment Template Details': 'Detalji predloška ocjene', 'Assessment Template updated': 'Ažuriran predložak ocjene', 'Assessment Templates': 'Predlošci ocjene', 'Assessment timeline': 'Procjena vremenskog roka', 'Assessment Type:': 'Vrsta procjene:', 'Assessment updated': 'Ažurirana procjena', 'Assessments': 'Procjene', 'Assessments and Activities': 'Dodjele i aktivnosti', 'Assessments are structured reports done by Professional Organizations': 'Procjene su struktuirani izvještaji koje obavljaju profesionalne organizacije', 'Assessments Needs vs. Activities': 'Procjena potreba u usporedbi s aktivnostima', 'Assessments:': 'Procjene:', 'Assessor': 'Procjenitelj', 'Asset': 'Sredstvo', 'Asset added': 'Dodano sredstvo', 'Asset Assignments': 'Dodjela sredstava', 'Asset Assignments deleted': 'Dodjela sredstava je izbrisana', 'Asset deleted': 'Obrisano sredstvo', 'Asset Details': 'Detalji sredstva', 'Asset Item': 'Stavka sredstava', 'Asset Log': 'Zapisnik sredstava', 'Asset Log Details': 'Detalji zapisnika imovine i sredstava', 'Asset Log Empty': 'Zapisnik sredstava je prazan', 'Asset Log Entry Added - Change Label': 'Stavka zapisnika o sredstvu dodana - Promijenite naziv', 'Asset Log Entry deleted': 'Unos sredstva u zapisnik je obrisan', 'Asset Log Entry updated': 'Unos je ažuriran', 'Asset Management': 'Upravljanje sredstvima', 'Asset Number': 'Broj sredstva', 'Asset removed': 'Sredstvo uklonjeno', 'Asset updated': 'Ažurirano sredstvo', 'Assets': 'Materijalno-tehnička Sredstva', 'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Materijalno-tehnička sredstva su resursi koji nisu potrošna roba i očekuje se njihov povrat, stoga je neophodan nadzor.', 'Assign': 'Dodjeli', 'Assign %(staff)s': 'Dodijeli %(staff)s', 'Assign another Role': 'Dodijeli drugu ulogu', 'Assign Asset': 'Dodijeli sredstvo', 'Assign Facility': 'Dodijeli objekat', 'Assign Group': 'Dodijeli grupu', 'Assign Human Resource': 'Dodijeli ljudske resurse', 'Assign Role to a User': 'Dodijeli ulogu korisniku', 'Assign Roles': 'Dodijeli uloge', 'Assign Staff': 'Dodjeli Osoblje', 'Assign Storage Location': 'Dodijeli lokaciju skladišta', 'Assign to Facility/Site': 'Dodijeli objektu/mjestu', 'Assign to Org.': 'Dodijeliti organizaciji', 'Assign to Organisation': 'Dodijeli organizaciji', 'Assign to Organization': 'Dodijeli organizaciji', 'Assign to Person': 'Dodijeli osobi', 'Assign to Site': 'Dodijeli mjestu', 'Assign Vehicle': 'Dodijeli vozila', 'assigned': 'dodijeljen', 'Assigned': 'Dodijeljeno', 'Assigned By': 'Dodijeljen od strane', 'Assigned Human Resources': 'Dodijeljeni ljudski resursi', 'Assigned Roles': 'Dodijeljene uloge', 'Assigned To': 'Dodjeljen', 'Assigned to': 'Dodijeljen', 'Assigned to Facility/Site': 'Dodijeljeni objeku/mjestu', 'Assigned to Organisation': 'dodijeljen organizaciji', 'Assigned to Organization': 'Dodijeljeno organizaciji', 'Assigned to Person': 'Dodijeljeno Osobi', 'Assigned to Site': 'Pridružen mjestu', 'Assignments': 'Zadaci', 'Assistant': 'Asistent', 'Assisted Family Care': 'Pomoć u porodičnoj njezi', 'Assisted Self-care': 'Pomoć u samonjezi', 'Association': 'Savez', 'At or below %s': 'Na ili ispod %s', 'At/Visited Location (not virtual)': 'Na/posjećena lokacija (ne virtuelno)', 'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20 brza procjena modifikovana za New Zealand', 'Attachments': 'Dodaci', 'Attend to information sources as described in <instruction>': 'Pobrinuti se za izvore informacija kao što je opisano u polju <instruction>', 'Attributes': 'Atributi', 'Attribution': 'Pripisivanje', 'Audit Read': 'Prati čitanje', 'Australia': 'Australija', 'Austria': 'Austrija', 'AUTH TOKEN': 'AUTH TOKEN', "Authenticate system's Twitter account": 'Potvrdite Twitter račun sistema', 'Authentication Required': 'Potrebna provjera autentičnosti', 'Author': 'Autor', 'Auto start': 'Samopokretanje', 'Automatic Database Synchronization History': 'Historijat automatske sinhronizacije sa bazom podataka', 'Automotive': 'Samohodni', 'Availability': 'Dostupnost', 'Available': 'Raspoloživo', 'Available Alternative Inventories': 'Dostupne alternativne zalihe', 'Available Beds': 'Dostupni kreveti', 'Available databases and tables': 'Dostupne baze podataka i tabele', 'Available Databases and Tables': 'Dostupne baze podataka i tabele', 'Available for Location': 'Dostupno za lokaciju', 'Available Forms': 'Dostupne forme', 'Available from': 'Na raspolaganju od', 'Available in Viewer?': 'Je li dostupno u pregledniku?', 'Available Inventories': 'Dostupne zalihe', 'Available Messages': 'Dostupne Poruke', 'Available Records': 'Dostupni zapisi', 'Available Recovery Reports': 'Dostupni izvještaji o nađenim tijelima', 'Available until': 'Dostupno do', 'Avalanche': 'Lavina', 'average': 'prosjek', 'Average': 'Prosjek', 'Avoid the subject event as per the <instruction>': 'Izbjegni predmet događanja kao po <instruction>', 'Award': 'Nagrada', 'Award added': 'Nagrada dodana', 'Award deleted': 'Nagrada obrisana', 'Award updated': 'Nagrada ažurirana', 'Awards': 'Nagrade', 'Awareness raising': 'Podizanje obaviještenosti', 'Azerbaijan': 'Azerbejdžan', 'Babies who are not being breastfed, what are they being fed on?': 'Bebe koje nisu dojene, na koji su način hranjene?', 'Baby And Child Care': 'Bebe i briga za djecu', 'Back to Roles List': 'Nazad na listu uloga', 'Back to Top': 'Nazad na vrh', 'Back to Users List': 'Nazad na listu korisnika', 'Background Color': 'Pozadinska boja', 'Background Colour': 'Boja pozadine', 'Background Colour for Text blocks': 'Boja pozadine za tekstualne blokove', 'Bahai': 'Bahai', 'Bahamas': 'Bahami', 'Bahrain': 'Bahrein', 'Baldness': 'ćelavost', 'Banana': 'Banana', 'Bangladesh': 'Bangladeš', 'Bank/micro finance': 'Banka/mikrokreditna organizacija', 'Barge Capacity': 'Kapacitet skele', 'Barricades are needed': 'Potrebne su barikade', 'Base %(facility)s Set': 'Baza %(facility)s postavljena', 'Base Facility/Site Set': 'Postavljeno mjesto/objekt baze', 'Base Layer?': 'Osnovni sloj?', 'Base Layers': 'Osnovni slojevi', 'Base Location': 'Osnovna lokacija', 'Base Location Updated!': 'Lokacija baze ažurirana!', 'Base Site Set': 'Postavljeno mjesto baze', 'Base Station added': 'Sodana bazna stanica', 'Base Station deleted': 'Obrisana bazna stanica', 'Base Station Details': 'Detalji bazne stanice', 'Base Station updated': 'Ažurirana bazna stanica', 'Base Stations': 'Bazne stanice', 'Base Unit': 'Bazna jedinica', 'Base URL of the remote Sahana Eden instance including application path, e.g. http://www.example.org/eden': 'Bazni URL udaljene Sahana Eden instance uključujući stazu aplikacije, npr. http://www.example.org/eden', 'Baseline added': 'referentna tačka dodana', 'Baseline Data': 'Referentni podaci', 'Baseline deleted': 'Referentna tačka je izbrisana', 'Baseline Number of Beds': 'Bazni broj kreveta', 'Baseline number of beds of that type in this unit.': 'referentni broj kreveta tog tipa u ovoj jedinici', 'Baseline Type': 'Vrsta referentne tačku', 'Baseline Type added': 'Tip referentne tačke dodan', 'Baseline Type deleted': 'Izbrisan tip referentne tačke', 'Baseline Type Details': 'Detalji tipa referentne tačke', 'Baseline Type updated': 'Tip referentne tačke je ažuriran', 'Baseline Types': 'Tip referentne tačke', 'Baseline updated': 'Izmijenjena referentna tačka', 'Baselines': 'Referentne tačke', 'Baselines Details': 'Detalji referentne tačke', 'Basic Assessment': 'Osnovna procjena', 'Basic Assessment Reported': 'Osnovna procjena prijavljena', 'Basic Details': 'Osnovni detalji', 'Basic information on the requests and donations, such as category, the units, contact details and the status.': 'Osnovne informacije o zahtjevima i donacijama, kao što su kategorija, jedinice, kontaktni detalji i status-', 'Basic medical supplies available prior to disaster': 'Osnovna medicinska podrška dostupna prije katastrove', 'Basic medical supplies available since disaster': 'Osnovna medicinska podrška dostupna nakon katastrove', 'Basic reports on the Shelter and drill-down by region': 'Osnovni izvještaji o skloništu i dublja analiza po regijama', 'Baud': 'Baud', 'Baud rate to use for your modem - The default is safe for most cases': 'Baud rate korišten za Vaš modem - Zadano je sigurno za većinu slučajeva', 'BDRT (Branch disaster response teams)': 'BDRT (Odgovorni timovi ogranka u slučaju katastrofe)', 'Beam': 'Zraka', 'Bed Capacity': 'Krevetni kapaciteti', 'Bed Capacity per Unit': 'Kapacitet kreveta po jedinici', 'Bed Type': 'TIp ležaja', 'Bed type already registered': 'Tip kreveta već registriran', 'Bedding materials available': 'Dostupni materijali posteljine', 'beginning': 'početak', 'Belarus': 'Bjelorusija', 'Belgium': 'Belgija', 'belongs to': 'pripada u', 'Below ground level': 'Ispod nivoa tla', 'Beneficiaries': 'Korisnici', 'Beneficiaries Added': 'Korisnici dodani', 'Beneficiaries Deleted': 'Korisnici izbrisani', 'Beneficiaries Details': 'Detalji korisnika', 'Beneficiaries Updated': 'Korisnici ažurirani', 'Beneficiary': 'Korisnik', 'Beneficiary Report': 'Izvještaj o korisnicima', 'Beneficiary Type': 'Tip korisnika', 'Beneficiary Type Added': 'Dodan tip korisnika', 'Beneficiary Type Deleted': 'Izbrisan tip korisnika', 'Beneficiary Type Updated': 'Ažuriran tip korisnika', 'Beneficiary Types': 'Tipovi korisnika', 'Bhuddist': 'Bhudist', 'Bhutan': 'Butan', 'Big Capacity Tank Vehicle': 'Vozilo rezervoar velikog kapaciteta', 'Bilateral': 'Dvostrana', 'Bin': 'Korpa', 'Bing Layer': 'Bing sloj', "Bing Layers cannot be displayed if there isn't a valid API Key": 'Bing slojevi ne mogu biti prikazani ako nije ispravan API ključ', 'Biological Hazard': 'Biološke opasnosti', 'Biscuits': 'Keks', 'black': 'crna', 'Blizzard': 'Mećava', 'Blocked': 'Blokirano', 'blond': 'plavokosa', 'Blood Type (AB0)': 'Krvna grupa (AB0)', 'Blowing Snow': 'Mećava', 'blue': 'plavo', 'Boat': 'Čamac', 'Bodies': 'Tijela', 'Bodies found': 'Pronađena tijela', 'Bodies recovered': 'Pronađena tijela', 'Bodily Constitution': 'Tjelesna konstitucija', 'Body': 'Tijelo', 'Body Finds': 'Nađena tijela', 'Body Hair': 'Dlake po tijelu', 'Body hair, Colour': 'Dlake po tijelu, boja', 'Body hair, Extent': 'Dlake po tijelu, dužina', 'Body Recovery': 'Izvlačenje tijela', 'Body Recovery Request': 'Zahtjev za izvlačenje tijela', 'Body Recovery Requests': 'Zahtjeci za izvlačenje tijela', 'Bolivia': 'Bolivija', 'Bomb': 'Bomba', 'Bomb Explosion': 'Eksplozija bombe', 'Bomb Threat': 'Prijetnja bombom', 'Border Colour for Text blocks': 'Boja rubova tekstualnih polja', 'Bosnia and Herzegovina': 'Bosna i Herzegovina', 'Both': 'Oboje', 'Botswana': 'Bocvana', 'Bounding Box Insets': 'Nacrti okvirne kutije', 'Bounding Box Size': 'Velilčina ambalažne kutije', 'box': 'kutija', 'Boys 13-18 yrs in affected area': 'Dječaci 13-18 god u pogođenom području', 'Boys 13-18 yrs not attending school': 'Dječaci 13-18 godina koji ne pohađaju školu', 'Boys 6-12 yrs in affected area': 'Dječaci 6-12 godina u zahvaćenim područjima', 'Boys 6-12 yrs not attending school': 'Dječaci 6-12 godina koje ne pohađaju školu', 'Branch': 'Ogranak', 'Branch Coordinator': 'Koordinator ogranka', 'Branch Organization added': 'Dodan ogranak organizacije', 'Branch Organization deleted': 'Obrisan ogranak organizacije', 'Branch Organization Details': 'Detalji ogranka organizacije', 'Branch Organization updated': 'Ažuriran ogranak organizacije', 'Branch Organizations': 'ogranci organizacije', 'Branches': 'Ogranci', 'Brand': 'Marka', 'Brand added': 'Marka dodana', 'Brand deleted': 'Marka obrisana', 'Brand Details': 'Detalji marke', 'Brand updated': 'Marka ažurirana', 'Brands': 'Marke', 'Breakdown': 'Prekid', 'Breast milk substitutes in use since disaster': 'Zamjene za majčino mlijeko korištene nakon katastrofe', 'Breast milk substitutes used prior to disaster': 'Korištene zamjene za majčino mlijeko prije katastrofe', 'Bricks': 'Cigle', 'Bridge Closed': 'Most zatvoren', 'broad': 'široko', 'brown': 'smeđa', 'Brunei': 'Brunej', 'Bucket': 'Kanta', 'Buddhist': 'Budist', 'Budget': 'Budžet', 'Budget added': 'Dodat budžet', 'Budget deleted': 'Budžet obrisan', 'Budget Details': 'Detalji Budzeta', 'Budget Updated': 'Budžet Ažuriran', 'Budget updated': 'Budžet ažuriran', 'Budgeting Module': 'Modul za budžetiranje', 'Budgets': 'Budžeti', 'Buffer': 'Spremnik', 'Bug': 'Buba', 'Building Assessments': 'Procjene građevina', 'Building Collapsed': 'Zgrada srušena', 'Building Name': 'Ime zgrade', 'Building or storey leaning': 'Zgrada ili sprat su nageti', 'Building Safety Assessments': 'Procjena sigurnosti objekta', 'Building Short Name/Business Name': 'Ime zgrade/biznisa', 'Built using the Template agreed by a group of NGOs working together as the': 'Izgrađeno koristeći šablon kreiran od strane grupe NVO radeći zajedno kao', 'Bulgaria': 'Bugarska', 'Bulk Uploader': 'Masovni prenos', 'Bundle': 'Paket', 'Bundle added': 'Paket dodan', 'Bundle Contents': 'Sadržaji paketa', 'Bundle deleted': 'Paket obrisan', 'Bundle Details': 'Detalji paketa', 'Bundle Updated': 'Paket ažuriran', 'Bundle updated': 'Paket je ažuriran', 'Bundles': 'Svežnji', 'Bunion': 'Kriv nožni palac', 'Burn': 'Spaljeno', 'Burn ICU': 'Spaljen ICU', 'Burned/charred': 'Spaljeno/ugljenisano', 'Business damaged': 'Oštećenje industrije', 'Button name': 'Ime dugmeta', 'by': 'od strane', 'by %(person)s': 'od %(person)s', 'By %(site)s': 'Po %(site)s', 'By Facility': 'Po objektu', 'By Inventory': 'Po skladištu', 'By selecting this you agree that we may contact you.': 'Izborom ovoga slažete se da vas možemo kontaktirati.', 'By Site': 'Po mjestu', 'By Warehouse': 'NEBESKO SKLADIŠTE', 'c/o Name': 'c/o Ime', 'Cache': 'Keš', 'Cache Keys': 'Ključevi za gotovinu', 'Calculate': 'Izračunaj', 'Calendar': 'Kalendar', 'Cambodia': 'Kampučija', 'Cameroon': 'Kamerun', 'Camp': 'Kamp', 'Camp added': 'Dodan kamp', 'Camp Coordination/Management': 'Koordinacija kampa/Menadžment', 'Camp deleted': 'Obrisan kamp', 'Camp Details': 'Detalji o kampu', 'Camp Service': 'Usluga kampa', 'Camp Service added': 'Dodana je uluga kampa', 'Camp Service deleted': 'Obrisana je usluga kampa', 'Camp Service Details': 'Detalji o uslugama kampa', 'Camp Service updated': 'Ažurirana je usluga kampa', 'Camp Services': 'Usluge kampa', 'Camp Status': 'Status kampa', 'Camp Status added': 'Dodana je status kampa', 'Camp Status deleted': 'Obrisan je status kampa', 'Camp Status Details': 'Detalji statusa kampa', 'Camp Status updated': 'Ažuriran je status kampa', 'Camp Statuses': 'Statusi kampa', 'Camp Type': 'Tip kampa', 'Camp Type added': 'Tip kampa dodan', 'Camp Type deleted': 'Tip kampa obrisan', 'Camp Type Details': 'Detalji tipa kampa', 'Camp Type updated': 'Tip kampa ažuriran', 'Camp Types': 'Vrste kampa', 'Camp Types and Services': 'Tipovi i usluge kampova', 'Camp updated': 'Ažuriran kamp', 'Campaign': 'Kampanja', 'Campaign Added': 'Kampanja dodana', 'Campaign Deleted': 'Kampanja izbrisana', 'Campaign ID': 'ID kampanje', 'Campaign Message': 'Poruka kampanje', 'Campaign Message Added': 'Dodana poruka kampanje', 'Campaign Message Deleted': 'Obrisana poruka kampanje', 'Campaign Message Updated': 'Ažurirana poruka kampanje', 'Campaign Messages': 'Poruke kampanje', 'Campaign Updated': 'Kampanja ažurirana', 'Campaigns': 'Kampanje', 'Camps': 'Kampovi', 'Can be grouped together into Feature Groups': 'Mogu se grupisati u grupe karakteristika', 'can be used to extract data from spreadsheets and put them into database tables.': 'može se koristiti za izvlačenje podataka iz tabelarnog prikaza i stavljanje istih u tabele baza podataka.', 'Can only approve 1 record at a time!': 'Moguće je potvrditi samo jedan zapis istovremeno!', 'Can only disable 1 record at a time!': 'Moguće je onemogućiti samo 1 zapis trenutno!', 'Can only enable 1 record at a time!': 'Omogućen je samo jedan zapis istovremeno!', 'Can only update 1 record at a time!': 'Moguće je ažurirati samo jedan zapis istovremeno!', 'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'Može čitati tačke interesa iz OpenStreetMap datoteke (.osm) ili rezervnog servera.', "Can't import tweepy": 'Nemoguće unijeti tweepy', 'Canada': 'Kanada', 'Cancel': 'Otkaži', 'Cancel Crop': 'Otkaži rezanej', 'Cancel editing': 'Otkaži uređivanje', 'Cancel Log Entry': 'Otkaži stavku zapisnika', 'Cancel Shipment': 'Otkazati pošiljku', 'Canceled': 'Otkazano', 'Candidate Matches for Body %(label)s': 'Kandidat odgovara tijelu %(label)s', 'Candidate Matches for Body %s': 'Kandidat odgovara tijelu %s', 'Canned Fish': 'Konzervirana riba', 'cannot be deleted.': 'ne može se obrisati.', 'Cannot be empty': 'Ne može biti prazno', 'Cannot disable your own account!': 'Ne možete onesposobiti svoj račun!', 'Cannot make an Organization a branch of itself!': 'Ne može se napraviti organizacija koja je vlastiti ogranak!', 'Cannot open created OSM file!': 'Ne mogu otvoriti kreiranu OSM datoteku!', 'Cannot read from file: %(filename)s': 'Ne mogu pročitati iz datoteke: %(filename)s', 'Cannot send messages if Messaging module disabled': 'Ne mogu se slati poruke ako je modul za poruke isključen', 'Capacity (Day / Evacuation)': 'Kapacitet (Dan / Evakuacija)', 'Capacity (Day and Night)': 'Capacity (Day and Night)', 'Capacity (Day)': 'Kapacitet (dan)', 'Capacity (Max Persons)': 'Kapacitet (maksimalan broj osoba)', 'Capacity (Night / Post-Impact)': 'Kapacitet (Noć / nakon utjecaja)', 'Capacity (Night only)': 'Capacity (Night only)', 'Capacity (Night)': 'Kapacitet (noć)', 'Capacity (W x D X H)': 'Kapacitet (Š x D x V)', 'Capacity Building': 'Kapacitet zgrada', 'Cape Verde': 'Zelenortska Ostrva', 'Capture Contact Information': 'Dohvati informacije o kontaktu', 'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Snimi informacije o grupama žrtava nesreće (turisti, putnici, porodice, itd.)', 'Capture Information on each disaster victim': 'Unesi informacije o svakoj žrtvi katastrofe', 'Capturing the projects each organization is providing and where': 'Bilježenje projekata koje svaka organizacija omogućava i gdje', 'Card holder': 'Vlasnik kartice', 'Cardiology': 'Kardiologija', 'Cargo Pier Depth': 'Dubina mola za teret', 'Case added': 'Dodan slučaj', 'Case deleted': 'Obrisan slučaj', 'Case Details': 'Detalji slučaja', 'Case Number': 'Broj slučaja', 'Case updated': 'Ažuriran slučaj', 'Cases': 'Slučajevi', 'Cash available to restart business': 'Gotovina dostupna za ponovni početak posla', 'Cassava': 'Tropska biljka manioka', 'Casual Labor': 'Obični rad', 'Casualties': 'Gubici', 'Catalog': 'Katalog', 'Catalog added': 'Katalog dodan', 'Catalog deleted': 'Katalog obrisan', 'Catalog Details': 'Detalji o katalogu', 'Catalog Item': 'Stavka kataloga', 'Catalog Item added': 'Dodata stavka u katalog', 'Catalog Item deleted': 'Obrisana stavka iz katalog', 'Catalog Item updated': 'Ažurirana stavka u katalog', 'Catalog Items': 'stavke kataloga', 'Catalog Name': 'Ime kataloga', 'Catalog updated': 'Katalog ažuriran', 'Catalogs': 'Katalozi', 'Categories': 'Kategorije', 'Category': 'Kategorija', 'Category:': 'Kategorija:', 'Category<>Sub-Category<>Catalog Relation added': 'Kategorija<>Podkategorija<>kataloški odnos dodan', 'Category<>Sub-Category<>Catalog Relation updated': 'Kategorija<>Podkategorija<>kataloški odnos ažuriran', 'caucasoid': 'bjelačka', "Caution: doesn't respect the framework rules!": 'Upozorenje: nepoštivanje okvirnih pravila!', 'CBA Women': 'CBA žena', 'CDRT (Community disaster response teams)': 'CDRT (Timovi zajednice za odgovore u slučaju katastrofe)', 'Ceilings, light fixtures': 'Stropovi, popravke svjetla', 'Cell Phone': 'Mobilni telefon', 'Cell Tower': 'Ćelijski toranj', 'Central African Republic': 'Centralnoafrička Republka', 'Central point to record details on People': 'Centralna lokacija za bilježenje detalja o ljudima', 'Certificate': 'Certifikat', 'Certificate added': 'Dodat certifikat', 'Certificate Catalog': 'Katalog Certifikata', 'Certificate deleted': 'Obrisan certifikat', 'Certificate Details': 'Detalji o certifikatu', 'Certificate Status': 'Status certifikata', 'Certificate updated': 'Ažuriran certifikat', 'Certificates': 'Certifikati', 'Certification': 'Certificiranje', 'Certification added': 'Dodan certifikat', 'Certification deleted': 'Obrisana certifikacija', 'Certification Details': 'Detalji certifikacije', 'Certification updated': 'Ažurirana certifikacija', 'Certifications': 'Certifikati', 'Certifying Organization': 'Organizacija koja daje certifikat', 'Chad': 'Čad', 'Change Password': 'Promijeni lozinku', 'Channel': 'Kanal', 'Chart': 'Grafikon', 'Chat on IRC': 'Ćaskanje na IRC', 'Check': 'Provjera', 'check all': 'označi sve', 'Check all': 'Provjeri sve', 'Check for errors in the URL, maybe the address was mistyped.': 'Pogledajte greške na URL , možda je došlo do greške pri kucanju.', 'Check if the URL is pointing to a directory instead of a webpage.': 'Provjeri da li URL pokazuje na direktorij umjesto na stranicu', 'Check outbox for the message status': 'Provjerite izlaznu poštu za status poruke', 'Check Request': 'Provjerite zahtjev', 'Check this to make your search viewable by others.': 'Označite ovo da vaša pretraga bude vidljiva ostavlim', 'Check to delete': 'Označi za brisanje', 'Check to delete:': 'Označi z abrisanje', 'Check-In': 'Ubaci', 'Check-in at Facility': 'Označi na objektu', 'Check-Out': 'Izdvoji', 'Checked': 'Provjereno', 'checked': 'provjereno', 'Checked-In successfully!': 'Uspješno ubačeno', 'Checked-Out successfully!': 'Uspješno izdvojeno', 'Checklist': 'Spisak', 'Checklist created': 'Kontrolni spisak kreiran', 'Checklist deleted': 'Lista zadataka obrisana', 'Checklist Item': 'Zadatak u listi', 'Checklist of Operations': 'Lista operacija', 'Checklist updated': 'Ažurirana lista zadataka', 'Checklists': 'Liste zadataka', 'Chemical Hazard': 'Hemijska opasnost', 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Hemijske, biološke, radiološke, nuklearne ili visoko-prinosne eksplozivne prijetnje ili napadi', 'Chewing tobacco': 'Duhan za žvakanje', 'Chicken': 'Pilići', 'Child': 'Dijete', 'Child (2-11)': 'Dijete (2-11)', 'Child (< 18 yrs)': 'dijete(<18 godina)', 'Child Abduction Emergency': 'Hitan slučaj otmice djeteta', 'Child headed households (<18 yrs)': 'Dijete na čelu domaćinstva (<18 god)', 'Children (2-5 years)': 'Djeca (2-5 godina)', 'Children (5-15 years)': 'Djeca (5-15 godina)', 'Children (< 2 years)': 'Djeca (mlađa od 2 godine)', 'Children in adult prisons': 'Djeca u zatvorima za odrasle', 'Children in boarding schools': 'Djeca u internatima', 'Children in homes for disabled children': 'Djeca u kućama za djecu s invaliditetom', 'Children in juvenile detention': 'Djeca u pritvoru za maloljetnike', 'Children in orphanages': 'Djeca u sirotištu', 'Children living on their own (without adults)': 'Djeca koja žive sama (bez staratelja)', 'Children not enrolled in new school': 'Djeca koja nisu upisana o novu školu', 'Children orphaned by the disaster': 'Djeca koja su siročad zbog katastrofe', 'Children separated from their parents/caregivers': 'Djeca odvojena od svojih roditelja/staratelja', 'Children that have been sent to safe places': 'Djeca koja su poslana na sigurna mjesta', 'Children who have disappeared since the disaster': 'Djeca nestala nakon katastrofe', 'Children with chronical illnesses': 'Djeca s hroničnim bolestima', "Children's Education": 'Obrazovanje djece', 'Chile': 'Čile', 'Chin, Inclination': 'Brada, nagib', 'Chin, Shape': 'Brada, oblik', 'Chin, Size': 'Brada, veličina', 'China': 'Kina', 'Chinese': 'Kineski', 'Chinese (Simplified)': 'Kineski (pojednostavljen)', 'Chinese (Taiwan)': 'Kineski (Tajvan)', 'Cholera Treatment': 'Tretman kolere', 'Cholera Treatment Capability': 'Sposobnost tretmana kolere', 'Cholera Treatment Center': 'Centar za tretman kolere', 'Cholera-Treatment-Center': 'Centar za liječenje kolere', 'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Izaberi novo slanje bazirano na novim vrednovanjima i procjeni tima. Teški uslovi koji utiču na cijelu zgradu su temeljni za NESIGURNO postavljanje. Lokalizirano teški i pretežno umjereni uslovi mogu zahtjevati OGRANIČENO korištenje. Stavite UOČLJIV plakat na glavni ulaz. Postavite sve ostale plakate na sve značajnije ulaze.', 'Choose Country': 'Izaberite državu', 'Choose Manually': 'Izaberi ručno', 'Choosing Skill and Resources of Volunteers': 'Izbor vještina i resursa volontera', 'Christian': 'Kršćanin', 'Church': 'Crkva', 'Cigarettes': 'Cigarete', 'Cigars': 'Cigare', 'circular': 'kružno', 'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': 'Okolnosi nestanka, druge žrtve/svjedoci koji su zadnji vidjeli živu osobu.', 'City': 'grad', 'City / Town / Village': 'Općina/Mjesto', 'Civil Emergency': 'Civilno izvanredno stanje', 'Cladding, glazing': 'Oblaganje, glačanje', 'Clean Instance': 'Čista kopija', 'clear': 'čisto', 'Clear All': 'Obriši sve', 'Clear all Layers': 'Očisti sve slojeve', 'Clear CACHE?': 'Obrisati predmemoriju?', 'Clear DISK': 'Obriši na disku', 'Clear filter': 'Očisti filter', 'Clear RAM': 'Obriši u memoriji', 'Clear Selection': 'Obriši izbor', 'Cleft chin': 'Rupica na bradi', "Click 'Start' to synchronize with this repository now:": "Kliknite 'Start' za sinhronizaciju s ovim repozitorijem sada:", 'click for more details': 'pritisni za više detalja', 'click here': 'kliknite ovdje', 'Click on a marker to see the Completed Assessment Form': 'Kliknite na marker da vidite formular za završenu ocjenu', "Click on questions below to select them, then click 'Display Selected Questions' button to view the selected questions for all Completed Assessment Forms": "Kliknite na pitanja ispod da ih odaberete, zatim kliknite na 'Prikaži izabrana pitanja' dugme da vidite izabrana pitanja za sve završene formulare procjene.", 'Click on the chart to show/hide the form.': 'Kliknite na dijagram za prikaz/sakrivanje formulara', 'Click on the link': 'Kliknite na link', 'Click on the slider to choose a value': 'Kliknite na klizač za izbor vrijednosti', 'Click to edit': 'Kliknite da uredite', 'Click where you want to open Streetview': 'Kliknite gdje želite otvoriti Streetview', 'Client ID': 'Identifikacija korisnika', 'Client IP': 'IP klijenta', 'Client Secret': 'Tajni ključ korisnika ', 'Climate': 'Klima', 'Climate change mitigation': 'Ograničenja izmjena klime', 'Climate change preparednes': 'Pripreme na izmjene klime', 'Clinical Laboratory': 'Klinički laboratorij', 'Clinical Operations': 'Kliničke operacije', 'Clinical Status': 'Klinički status', 'Close': 'Zatvori', 'Close map': 'Zatvori mapu', 'Closed': 'Zatvoreno', 'CLOSED': 'ZATVORENO', 'Closed?': 'Zatvoreno?', 'Closure': 'Zatvaranje', 'Clothing': 'Odjeća', 'Cluster': 'Skup', 'Cluster added': 'Skup dodan', 'Cluster Attribute': 'Atribut skupa', 'Cluster deleted': 'Skup obrisan', 'Cluster Details': 'Detalji skupa', 'Cluster Distance': 'Udaljenost skupova', 'Cluster Subsector': 'Podsektor skupa', 'Cluster Subsector added': 'Podsektor skupa dodan', 'Cluster Subsector deleted': 'Podsektor skupa obrisan', 'Cluster Subsector Details': 'Detalji podsektora skupa', 'Cluster Subsector updated': 'Podsektor skupa ažuriran', 'Cluster Subsectors': 'Podsektori skupa', 'Cluster Threshold': 'Prag skupa', 'Cluster updated': 'Skup ažuriran', 'Cluster(s)': 'Skup(ovi)', 'Clusters': 'Skupovi', 'CN': 'CN', 'Coalition added': 'Koalicija dodana', 'Coalition Details': 'Detalji koalicije', 'Coalition removed': 'Uklonjena koalicija', 'Coalition updated': 'Koalicija ažurirana', 'Coalitions': 'Koalicije', 'Code': 'Šifra', 'Code Share': 'Dijeljenja koda', 'Code:': 'Kôd:', 'Cold Wave': 'Hladni talas', 'Collapse, partial collapse, off foundation': 'Kolaps, djelimični kolaps, pomjereni temelji', 'collateral event': 'kolateralni događaj', 'Collective center': 'Kolektivni centar', 'Colombia': 'Kolumbija', 'Colour for Underline of Subheadings': 'Boja za podvlačenje podnaslova', 'Colour of bottom of Buttons when not pressed': 'Boja dna dugmadi kada nisu pritisnuta', 'Colour of bottom of Buttons when pressed': 'Boja dna tastera kada je pritisnut', 'Colour of Buttons when hovering': 'Boja dugmadi kada se prelijeću', 'Colour of dropdown menus': 'Boja padajućih menija', 'Colour of selected Input fields': 'Boja selektovanih polja za unos', 'Colour of selected menu items': 'Boja označenih stavki meni-a', 'Column Choices (One Per Line': 'Izbor kolona (Jedan po liniji)', 'Columns, pilasters, corbels': 'Stubovi, pilastri, korbali', 'Combined Method': 'Kombinovana metoda', 'Come back later.': 'Vratite se poslije.', 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Pokušajte kasnije. Svi koji posjećuju ovaj sajt vjerojatno imaju isti problem kao i vi.', 'Command Tactical Operational Vehicle': 'Komandno taktičko radno vozilo', 'Comment': 'Komentar', 'Comments': 'Komentari', 'Comments permitted?': 'Komentarisanje dozvoljeno', 'Commercial/Offices': 'Poslovni/Uredi', 'Commit': 'Izvrši', 'Commit All': 'Potvrdi sve', 'Commit Date': 'Datum izvršenja', 'Commit from %s': 'Izvrši od %s', 'Commit Status': 'Status potvrđivanja', 'Commit. Status': 'Status zaduženja', 'Commiting a changed spreadsheet to the database': 'Predavanje izmijenjenog tabelarnog prikaza bazi podataka', 'Commitment': 'Zaduženje', 'Commitment Added': 'Zaduženje dodano', 'Commitment Canceled': 'Zaduženje otkazano', 'Commitment Details': 'Detalji o zaduženjima', 'Commitment Item': 'Stavka angažovanja', 'Commitment Item added': 'Stavka zaduženj dodana', 'Commitment Item deleted': 'Stavka zaduženja obrisana', 'Commitment Item Details': 'Detalji o zaduženju', 'Commitment Item updated': 'Stavka zaduženja ažurirana', 'Commitment Items': 'Stavke zaduženja', 'Commitment Status': 'Status zaduženja', 'Commitment Updated': 'Zaduženje ažurirano', 'Commitments': 'Zaduženja', 'Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Zaduženja mogu biti napravljena prema ovim zahtjevima, ali ona ostaju otvorena dok zahtjevaoc ne potvrdi da je zahtjev kompletan.', 'Committed': 'Zaduženo', 'Committed By': 'Zaduženo od strane', 'Committed Items': 'Zadužene stavke', 'Committed People': 'Zaduženo osoblje', 'Committed People Details': 'Detalji o zaduženoj osobi', 'Committed People updated': 'Zadužene osobe ažurirane', 'Committed Person Details': 'Detalji o zaduženoj osobi', 'Committed Person updated': 'Zadužena osoba ažurirana', 'Committing Inventory': 'Predavanje inventara', 'Committing Organization': 'Izvršna organizacija', 'Committing Person': 'Izvršna osoba', 'Committing Warehouse': 'Zadužena skladišta', 'Commodities Loaded': 'Roba natovarena', 'Communication problems': 'Komunikacijski problemi', 'Communities': 'Zajednice', 'Community': 'Zajednica', 'Community Added': 'Uajednica dodana', 'Community Based Health and First Aid (CBHFA)': 'Zdravstvo i prva pomoć koju organuzuje društvena zajednica (CBHFA)', 'Community Centre': 'Mjesna zajednica', 'Community Contacts': 'Kontakt podaci zajednice', 'Community Deleted': 'Zajednica obrisana', 'Community Details': 'Detalji zajednice', 'Community Health Center': 'Dom zdravlja', 'Community Member': 'Član zajednice', 'Community organisation': 'Organizacija zajednice', 'Community Updated': 'Zajednica ažurirana', 'Comoros': 'Komori', 'Company': 'Preduzeće', 'Competencies': 'sposobnosti', 'Competency': 'Sposobnost', 'Competency added': 'Stručnosti dodane', 'Competency deleted': 'Stručnost obrisana', 'Competency Details': 'Detalji o sposobnostima', 'Competency Rating': 'Nivo spremnosti', 'Competency Rating added': 'Ocjena stručnosti dodana', 'Competency Rating Catalog': 'Katalog ocjena stručnosti', 'Competency Rating deleted': 'Ocjena stručnosti obrisana', 'Competency Rating Details': 'Detalji statusa spremnosti', 'Competency Rating updated': 'Ocjena stručnosti ažurirana', 'Competency Ratings': 'Ocjene sposobnosti', 'Competency updated': 'Stručnost je ažurirana', 'Complete': 'Završeno', 'Complete Adjustment': 'Završi podešavanje', 'Complete Database Synchronized': 'Kompletna baza podataka sinhronizovana', 'Complete Returns': 'Završena vraćanja', 'Complete Unit Label for e.g. meter for m.': 'Puno ime jedinice, npr metar za m.', 'Complete? Please call': 'Završeno? Molim pozovite', 'Completed': 'Završeno', 'completed': 'završeno', 'Completed Assessment Form deleted': 'Formular završene procjene obrisan', 'Completed Assessment Form Details': 'Detalji završenog formulara ocjene', 'Completed Assessment Form entered': 'Unesen završen formular ocjene', 'Completed Assessment Form updated': 'Brza procjena ažurirana', 'Completed Assessment Forms': 'Završeni formulari ocjene', 'Completed Assessments': 'Završene ocjene', 'Completed tour?': 'Završena tura', 'Completion Question': 'Pitanje završavanja', 'Complexion': 'Ten', 'Compose': 'Sastavi', 'Compromised': 'Kompromitirano', 'concave': 'konkavn', 'Concrete frame': 'Betonski okvir', 'Concrete shear wall': 'Betonsko smicanje zida', 'Condition': 'Stanje', 'Conduct a Disaster Assessment': 'Obavi procjenu katastrofe', 'Config': 'Konfiguracija', 'Config added': 'Konfiguracija dodana', 'Config deleted': 'Konfiguracija izbrisana', 'Config Details': 'Detalji konfiguracije', 'Config not found!': 'Konfiguracija nije nađena!', 'Config updated': 'KOnfiguracija ažurirana', 'Configs': 'Konfiguracije', 'Configuration': 'Konfiguracija', 'Configurations': 'Konfiguracije', 'Configure connection details and authentication': 'Konfigurišite detalje o povezivanju i autentifikaciju', 'Configure Layer for this Symbology': 'Konfigurišite sloj za ovo značenje simbola', 'Configure resources to synchronize, update methods and policies': 'Konfigurišite resurse za sinhronizaciju, metode ažuriranja i politike', 'Configure Run-time Settings': 'Konfiguriši izvršne postavke', 'Configure the default proxy server to connect to remote repositories': 'Konfigurišite podrazumijevani proxy server za vezu s udaljenim repozitorijima', 'Configure/Monitor Synchronization': 'Konfiguriši/prati sinhronizaciju', 'Confirm Shipment Received': 'Potvrdite primljenu pošiljku', 'Confirm that some items were returned from a delivery to beneficiaries and they will be accepted back into stock.': 'Potvrdite da su neki artikli vraćeni od korisnika i da će biti prihvaćeni nazad u skladište.', 'Confirm that the shipment has been received by a destination which will not record the shipment directly into the system and confirmed as received.': 'Potvrdite da je dostava stigla na odredište koje neće bilježiti dostavu direktno u sistem i da je potvrđeno kao primljeno.', 'Confirmed': 'Potvrđeno', 'confirmed': 'potvrđeno', 'Confirmed Incidents': 'Potvrđen incident', 'Confirming Organization': 'Organizacija koja potvrđuje', 'Conflict Details': 'Detalji sukoba', 'Conflict Policy': 'Politika konflikta', 'Conflict Resolution': 'Razrješenje konflikta', 'Congo, Democratic Republic of the (Congo-Kinshasa)': 'Kongo, Demokratska Republika (Zair)', 'Congo, Republic of the (Congo-Brazzaville)': 'Kongo, Republika (Brazzaville)', 'Connect Parser': 'Parser konekcija', 'consider': 'razmotri', 'Consignment Note': 'Sprovodni list', 'Consignment Number, Tracking Number, etc': 'Konsignacijski broj, praćeni broj itd.', 'constraint_id': 'ogranicenje_id', 'Constraints Only': 'Samo ograničenja', 'Consumable': 'Potrošni', 'Contact': 'Kontakt osoba', 'Contact Added': 'Informacije o kontaktu su dodane', 'Contact added': 'Informacije o kontaktu su dodane', 'Contact Data': 'Kontakt podaci', 'Contact deleted': 'Kontakt obrisan', 'Contact Deleted': 'Izbrisan kontakt', 'Contact details': 'Detalji o kontaktu', 'Contact Details': 'Detalji o kontaktu', 'Contact Details updated': 'Informacije o kontaktu su ažurirane', 'Contact Info': 'Kontakt podaci', 'Contact Information': 'Kontakt informacije', 'Contact Information Added': 'Informacije o kontaktu su unesene', 'Contact information added': 'Dodata kontakt informacija', 'Contact Information Deleted': 'Izbrisane informacije o kontaktu', 'Contact information deleted': 'Obrisana kontakt informacija', 'Contact Information Updated': 'Informacije o kontaktu ažurirane', 'Contact information updated': 'Ažurirana kontakt informacija', 'Contact Method': 'Način kontakta', 'Contact Name': 'Ime kontakt osobe', 'Contact People': 'Kontakt osobe', 'Contact Person': 'Kontakt osoba', 'Contact Phone': 'Kontakt telefon', 'Contact Updated': 'Ažurirane kontakt informacije', 'Contact Us': 'Kontaktirajte nas', 'Contact us': 'Kontaktirajte nas', 'Contacts': 'Kontakti', 'Contacts:': 'Kontakti:', 'Content': 'Sadržaj', 'Content Management': 'Upravljanje sadržajem', 'Content Management System': 'Sistem za upravljanje sadržajem', 'Contents': 'Sadržaj', 'Context': 'Kontekst', 'Contingency planning': 'Planiranje za vanredne slučajeve', 'Contract End Date': 'Krajnji datum ugovora', 'Contradictory values!': 'Protivriječne vrednosti!', 'Contributor': 'Saradnik', 'Controller': 'Kontroler', 'Controller name': 'Ime kontrolera', 'Controller tour is activated': 'Tura kontrolera je aktivirana', 'Conversion Tool': 'Sredstvo konverzije', 'convex': 'konveksni', 'Cook Islands': 'Kukova Ostrva', 'Cooking NFIs': 'Neprehrambeni artikli za kuhanje', 'Cooking Oil': 'Jestivo ulje', 'Coordinate Conversion': 'Pretvaranje Koordinata', 'Coordinate Layer': 'Sloj koordinata', 'Coping Activities': 'Aktivnosti suočavanja', 'Copy': 'Kopiraj', 'Corn': 'Kukuruz', 'Corporate Entity': 'Poslovna jedinica', 'Cost per Megabyte': 'Cijena po megabajtu', 'Cost per Minute': 'Trošak po minutu', 'Cost Type': 'Vrsta troška', 'Costa Rica': 'Kostarika', 'Could not add person record': 'Ne mogu dodati zapis o osobi', 'Could not auto-register at the repository, please register manually.': 'Ne mogu automatski registrovati na repozitoriju, molim registrujte ručno.', 'Could not create record.': 'Ne može se kreirati zapis', 'Could not initiate manual synchronization.': 'Ne mogu pokrenuti ručnu sinhronizaciju', 'Could not merge records. (Internal Error: %s)': 'Ne mogu spojiti slogove. (Interna greška: %s)', "couldn't be parsed so NetworkLinks not followed.": 'nije mogao biti analiziran pa se Mrežni linkovi ne prate', "Couldn't open %s!": 'Ne mogu otvoriti %s!', 'Counselling': 'Savjet', 'Count': 'Broj', 'Count of Question': 'Broj pitanja', 'Country': 'Država', 'Country Code': 'Kôd države', 'Country is required!': 'Zahtijevana država', 'Country of Residence': 'Država prebivališta', 'County': 'Pokrajina', 'County / District': 'Kanton / Regija', 'Course': 'Kurs', 'Course added': 'Dodan kurs', 'Course Catalog': 'Katalog kurseva', 'Course Certicate added': 'Dodat certifikat kursa', 'Course Certicate deleted': 'Certifikat kursa izbrisan', 'Course Certicate Details': 'Detalji certifikata kursa', 'Course Certicate updated': 'Potvrda o kursu ažurirana', 'Course Certicates': 'Certifikati kurseva', 'Course Certificate added': 'Dodat certifikat kursa', 'Course Certificate deleted': 'Obrisan certifikat kursa', 'Course Certificate Details': 'Detalji certifikata kursa', 'Course Certificate updated': 'Ažuriran certifikat kursa', 'Course Certificates': 'Certifikati kursa', 'Course deleted': 'Obrisan kurs', 'Course Details': 'Detalji kursa', 'Course updated': 'Ažuriran kurs', 'Courses': 'Kursevi', 'covered': 'prekriveno', 'Create': 'Kreiraj', 'Create & manage Distribution groups to receive Alerts': 'Kreiraj & upravljaj grupama distibucije za primanje znakova za uzbunu', "Create 'More Info'": 'Kreiraj dodatne podatke', 'Create a group entry in the registry.': 'Kreiraj unosenje grupe u registar.', 'Create a new facility or ensure that you have permissions for an existing facility.': 'Kreirajte novi objekat ili osigurajte da imate potrebna prava nad postojećim objektom.', 'Create a new Group.': 'Kreiraj novu grupu.', 'Create a new organization or ensure that you have permissions for an existing organization.': 'Kreirajte novu organizaciju ili osigurajte da imate potrebna prava nad postojećom organizacijom.', 'Create a new Team.': 'Kreiraj novi Tim', 'Create a Person': 'Kreiraj osobu', 'Create Activity': 'Kreiraj aktivnost', 'Create Activity Report': 'Kreiraj izvještaja o aktivnostima', 'Create Activity Type': 'Kreiraj tip aktivnosti', 'Create Airport': 'Kreiraj aerodrom', 'Create Alternative Item': 'Kreiraj alternativnu stavku', 'Create an Assessment Question': 'Kreiraj pitanje ocjene', 'Create Assessment Answer': 'Kreiraj odgovor ocjene', 'Create Assessment Template': 'Kreiraj predložak ocjene', 'Create Assessment': 'Kreiraj ocjene', 'Create Asset': 'Kreiraj sredstvo', 'Create Award': 'Kreiraj nagradu', 'Create Base Station': 'Kreiraj baznu stanicu', 'Create Bed Type': 'Kreiraj vrstu ležaja', 'Create Beneficiary Type': 'Kreiraj tip korisnika', 'Create Brand': 'Kreiraj proizvođačku marku', 'Create Campaign': 'Kreiraj kampanju', 'Create Case': 'Kreiraj slučaj', 'Create Catalog': 'Kreiraj katalog', 'Create Catalog Item': 'Kreiraj stavku kataloga', 'Create Certificate': 'Kreiraj certifikat', 'Create Checklist': 'Kreiraj listu zadataka', 'Create Cholera Treatment Capability Information': 'Kreiraj informacije o sposobnosti liječenja kolere', 'Create Cluster': 'Kreiraj grupisanje', 'Create Coalition': 'Kreiraj koaliciju', 'Create Community': 'Kreiraj Zajednicu', 'Create Competency Rating': 'Kreiraj status spremnosti', 'Create Contact': 'Kreiraj kontakt', 'Create Course': 'Kreiraj kurs', 'Create Dead Body Report': 'Kreiraj izvještaj o mrtvim tijelima', 'Create Department': 'Kreiraj odjeljenje', 'Create Details': 'Kreiraj detalje', 'Create Event': 'Kreiraj događaj', 'Create Event Type': 'Kreiraj tip događaja', 'Create Facility': 'Kreiraj objekat', 'Create Facility Type': 'Kreiraj vrstu objekta', 'Create Feature Layer': 'Kreiraj sloj karakteristika', 'Create GPS data': 'Kreiraj GPS podatke', 'Create Group': 'Kreiraj grupu', 'Create Group Entry': 'Kreiraj element grupe', 'Create Hazard': 'Kreiraj rizik', 'Create Heliport': 'Kreiraj heliodrom', 'Create Hospital': 'Kreiraj bolnicu', 'Create Identification Report': 'Kreiraj izvještaj o identifikacijama', 'Create Impact Assessment': 'Kreiraj procjenu utjecaja', 'Create Incident': 'Kreiraj incident', 'Create Incident Report': 'Kreiraj izvještaj o incidentu', 'Create Incident Type': 'Kreiraj tip incidenta', 'Create Item': 'Kreiraj stavku', 'Create Item Category': 'Kreiraj kategoriju stavke', 'Create Item Pack': 'Kreiraj paket stavki', 'Create Job': 'Kreiraj posao', 'Create Job Title': 'Kreiraj radno mjesto', 'Create Kit': 'Kreiraj komplet', 'Create Layer': 'Kreiraj sloj', 'Create Location': 'Kreiraj lokaciju', 'Create Location Hierarchy': 'Kreiraj hijerarhiju lokacija', 'Create Mailing List': 'Kreiraj listu za slanje poruka', 'Create Map Profile': 'Kreiraj konfiguraciju mape', 'Create Marker': 'Kreiraj marker', 'Create Member': 'Kreiraj člana', 'Create Milestone': 'Kreiraj prekretnicu', 'Create Mobile Impact Assessment': 'Kreiraj mobilnu procjenu utjecaja', 'Create Morgue': 'Kreiraj mrtvačnicu', 'Create Network': 'Kreiraj mrežu', 'Create New Asset': 'Kreiraj novo sredstvo', 'Create New Catalog Item': 'Kreiraj novu stavku kataloga', 'Create New Event': 'Napravi novi događaj', 'Create New Item Category': 'Kreiraj novu kategoriju stavke', 'Create new Office': 'Kreiraj novi ured', 'Create new Organization': 'Napravi novu organizaciju', 'Create New Request': 'Kreiraj novi zahtjev', 'Create New Scenario': 'Kreiranje novog scenarija', 'Create New Vehicle': 'Kreiraj novo vozilo', 'Create Office': 'Kreiraj kancelariju', 'Create Office Type': 'Kreiraj tip kancelarije', 'Create Organization': 'Kreiraj organizaciju', 'Create Organization Type': 'Kreiraj tip organizacije', 'Create Partner Organization': 'Kreiraj partnersku organizaciju', 'Create Personal Effects': 'Kreiraj lične uticaja', 'Create PoI Type': 'Kreiraj tačku interesa', 'Create Point of Interest': 'Kreiraj tačku interesa', 'Create Policy or Strategy': 'Kreiraj politiku ili strategiju', 'Create Post': 'Kreiraj blok ugradivog teksta', 'Create Program': 'Kreiraj program', 'Create Project': 'Kreiraj projekat', 'Create Projection': 'Kreiraj projekciju', 'Create Question Meta-Data': 'Kreiraj metapodatke pitanja', 'Create Rapid Assessment': 'Napravi brzu procjenu', 'Create Report': 'Kreiraj izvještaj', 'Create Repository': 'Kreiraj repozitorij', 'Create Request': 'Kreiraj zahtjev', 'Create Request Template': 'Kreiraj predložak zahtjeva', 'Create Resource': 'Kreiraj resurs', 'Create Resource Type': 'Kreiraj tip resursa', 'Create River': 'Kreiraj rijeku', 'Create Role': 'Kreiraj ulogu', 'Create Room': 'Kreiraj sobu', 'Create Seaport': 'Kreiraj luku', 'Create search': 'Kreiraj pretragu', 'Create Sector': 'Kreiraj sektor', 'Create Series': 'Kreiraj seriju', 'Create Service': 'Kreiraj uslugu', 'Create Service Profile': 'Kreiraj profil usluge', 'Create Shelter': 'Kreiraj sklonište', 'Create Shelter Service': 'Kreiraj uslugu skloništa', 'Create Shelter Status': 'Kreiraj status skloništa', 'Create Shelter Type': 'Kreiraj tip skloništa', 'Create Skill': 'Kreiraj vještinu', 'Create Skill Type': 'Kreiraj tip vještine', 'Create Staff Member': 'Kreiraj člana osoblja', 'Create Status': 'Kreiraj status', 'Create Status Report': 'Kreiraj statusni izvještaj', 'Create Supplier': 'Kreiraj dobavljača', 'Create Symbology': 'Kreiraj značenje simbola', 'Create Tag': 'Kreiraj oznaku', 'Create Task': 'Kreiraj zadatak', 'Create Team': 'Kreiraj tim', 'Create Template Section': 'Kreiraj odjeljak predloška', 'Create Theme': 'Kreiraj temu', 'Create Tour': 'Kreiraj turu', 'Create Training Event': 'Kreiraj događaj obuke', 'Create User': 'Kreiraj korisnika', 'Create Vehicle': 'Kreiraj vozilo', 'Create Vehicle Detail': 'Kreiraj detalje o vozilu', 'Create Volunteer': 'Kreiraj volontera', 'Create Volunteer Cluster': 'Kreiraj skup volontera', 'Create Volunteer Cluster Position': 'Kreiraj poziciju skupa volontera', 'Create Volunteer Cluster Type': 'Kreiraj tip skup volontera', 'Create Volunteer Role': 'Kreiraj ulogu volontera', 'Create Warehouse': 'Kreiraj skladište', 'Create, enter, and manage surveys.': 'Kreiraj, pristupi i upravljaj anketama.', 'created': 'kreirano', 'Created By': 'Kreirao', 'Created on %s': 'Kreirano %s', 'Created on %s by %s': 'Kreirano dana %s od strane %s', 'Creation of assessments': 'Kreiranje procjena', 'Creation of Surveys': 'Kreiranje anketa', 'Credential': 'Akreditiv', 'Credential added': 'Akreditiv dodan', 'Credential deleted': 'Akreditiv obrisan', 'Credential Details': 'Detalji o akreditivima', 'Credential updated': 'Akreditiv ažuriran', 'Credentialling Organization': 'Akreditirajuća organizacija', 'Credentials': 'Akreditivi', 'Credit Card': 'Kreditna kartica', 'Crime': 'Zločin', 'criminal intent': 'Namjera zločina', 'Criteria': 'Kriteriji', 'critical': 'kritično', 'Croatia': 'Hrvatska', 'Crop Image': 'Sasijeci sliku', 'cross-eyed': 'razrok', 'CSS file %s not writable - unable to apply theme!': 'CSS datoteka %s nemoguća za pisati - nije moguće promijeniti temu!', 'CSV file required': 'CSV datoteka je potrebna', 'Cuba': 'Kuba', 'curly': 'kovrčavo', 'Currency': 'Valuta', 'current': 'tekuće', 'Current': 'Tekući', 'Current community priorities': 'Trenutni prioriteti zajednice', 'Current Entries': 'Trenutni elementi', 'Current general needs': 'Trenutne generalne potrebe', 'Current greatest needs of vulnerable groups': 'Trenutno najveće potrebe pogođenih grupa', 'Current Group Members': 'Trenutni članovi grupe', 'Current Group Memberships': 'Trenutni članovi grupe', 'Current health problems': 'Trenutni zdravstveni problemi', 'Current Home Address': 'Trenutna kućna adresa', 'Current Identities': 'Trenutni identiteti', 'Current Location': 'Trenutna lokacija', 'Current Location Country': 'Zemlja trenutne lokacije', 'Current Location Phone Number': 'Broj telefona na trenutnoj lokaciji', 'Current Location Treating Hospital': 'Bolnica za tretman na trenutnoj lokaciji', 'Current Log Entries': 'Trenutne stavke zapisnika', 'Current main income sources': 'Trenutni glavni izvori prihoda', 'Current major expenses': 'Trrenutni veći troškovi', 'Current Memberships': 'Trenutno članstvo', 'Current Mileage': 'Trenutna kilometraža', 'Current Notes': 'Trenutne napomene', 'Current number of patients': 'Trenutni broj pacijenata', 'Current Owned By (Organization/Branch)': 'Trenutni vlasnik (organizacija/grana)', 'Current problems, categories': 'Trenutni problemi, kategorije', 'Current problems, details': 'Tekući problemi, pojedinosti', 'Current Records': 'Trenutni zapisi', 'Current Registrations': 'Trenutna Registracija', 'Current request': 'Trenutni zahtjev', 'Current response': 'trenutni odziv', 'Current session': 'Trenutna sesija', 'Current staffing level at the facility.': 'Trenutni nivo osoblja na objektu.', 'Current Status': 'Trenutni status', 'Current Team Members': 'Trenutni članovi tima', 'Current Twitter account': 'Trenutni twitter profil.', 'Current type of health problems, adults': 'Trenutna vrsta zdravstvenih problema odraslih', 'Current type of health problems, children': 'Drugi tip zdravstvenih problema, djeca', 'Current type of source for drinking water': 'Trenutni tip izvora pitke vode', 'Current type of source for sanitary water': 'Trenutni tip izvora sanitarne vode', 'Currently no Appraisals entered': 'Nema trenutno unesenih procjena ponuda', 'Currently no Certifications registered': 'Nema trenutno registrovanih potvrda', 'Currently no Competencies registered': 'Trenutno nema registrovanih kompetencija', 'Currently no Course Certicates registered': 'Trenutno nisu registrovani certifikati kurseva', 'Currently no Course Certificates registered': 'Trenutno nisu registrovani certifikati kursa', 'Currently no Credentials registered': 'Trenutno nema registriranih akreditiva', 'Currently no entries in the catalog': 'Trenutno nema unosa u katalog', 'Currently no hours recorded for this volunteer': 'Trenutno nema zabilježenih sati za ovog volontera', 'Currently no Missions registered': 'Trenutno nema registrovanih misija', 'Currently no Participants registered': 'Trenutno nema registrovanih učesnika', 'Currently no Professional Experience entered': 'Trenutno nije uneseno profesionalno iskustvo', 'Currently no programs registered': 'Trenutno nema registrovanih programa', 'Currently no Skill Equivalences registered': 'Trenutno nije zabilježena ekvivalencija vještina', 'Currently no Skills registered': 'Trenutno nema registriranih vještina', 'Currently no staff assigned': 'Trenutno nema dodijeljenog osoblja', 'Currently no training events registered': 'Trenutno nema događaja obuke registrovanih', 'Currently no Trainings registered': 'Trenutno nema registrovanih treninga', 'currently registered': 'Trenutno registrirani', 'Currently your system has default username and password. Username and Password are required by foriegn machines to sync data with your computer. You may set a username and password so that only those machines can fetch and submit data to your machines which your grant access by sharing your password.': 'Trenutno vaš sistem ima podrazumijevano korisničko ime i lozinku. Korisničko ime i lozinka su potrebni foriegn mašine za sinhronizaciju podataka s računalom. Možete postaviti korisničko ime i lozinku, tako da samo one mašine mogu dohvatiti i dostaviti podatke vaše mašine kojima ste dodijelili pristup dijeleći svoju lozinku.', 'Customs Capacity': 'Kapacitet carine', 'Customs Warehousing Storage Capacity': 'Kapacitet carinskog skladišta', 'Cyprus': 'Kipar', 'Czech Republic': 'Češka Republika', "Côte d'Ivoire": 'Obala Slonovače', 'Daily': 'Dnevno', 'daily': 'dnevno', 'Daily Work': 'Dnevni rad', 'Dam Overflow': 'Preliv Brane', 'Damage': 'Šteta', 'Damage Assessment': 'Procjena štete', 'Damage sustained': 'Pretrpljena šteta', 'Damaged': 'Oštećeno', 'Dangerous Person': 'Opasna osoba', 'dark': 'tamno', 'Dashboard': 'Kontrolna ploča', 'Data': 'Podatak', 'Data added to Theme Layer': 'Podaci dodani na tematski sloj', 'Data import policy': 'Politika uvoza podataka', 'Data not available': 'Podaci nije dostupni', 'Data Type': 'Tip podataka', 'data uploaded': 'podaci poslani', 'Data uploaded': 'Podaci preneseni', 'database': 'baza podataka', 'Database': 'Baza podataka', 'database %s select': 'baza podataka %s selektovana', 'Database %s select': 'baza podataka %s selektovana', 'DataTable ID': 'ID tabele podataka', 'DataTable row': 'Red tabele podataka', 'Date': 'Datum', 'Date & Time': 'Datum i vrijeme', 'Date and Time': 'Datum i vrijeme', 'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': 'Datum i vrijeme prijema robe. Normalno je ovdje prikazano trenutno vrijeme, ali se može izmijeniti u padajućoj listi.', 'Date and time this report relates to.': 'datum i vrijeme koje se odnose na ovaj izvještaj', 'Date Avaialble': 'Datum dostupan', 'Date Available': 'Datum dostupnosti', 'Date Created': 'Datum kreiranja', 'Date Due': 'Krajnji rok', 'Date Expected': 'Očekivan datum plaćanja', 'Date Modified': 'Datum izmjene', 'Date must be %(max)s or earlier!': 'Datum mora biti %(max)s ili raniji!', 'Date must be %(min)s or later!': 'Datum mora biti %(min)s ili kasniji!', 'Date must be between %(min)s and %(max)s!': 'Datum mora biti između %(min)s i %(max)s!', 'Date Needed By': 'Datum kada je potrebno', 'Date of Birth': 'Datum rođenja', 'Date of Latest Information on Beneficiaries Reached': 'Datum najnovijih informacija o korsnicima dostignut.', 'Date of Recovery': 'Datum pronalaska', 'Date of Report': 'Datum podnošenja izvještaja', 'Date of Treatment': 'Datum tretmana', 'Date Printed': 'Datum štampe', 'Date Published': 'Datum objavljivanja', 'Date Question': 'Datum pitanja', 'Date Received': 'Datum prijema', 'Date Released': 'Datum izlaza', 'Date Repacked': 'Datum ponovnog pakovanja', 'Date Requested': 'Trazeni datum', 'Date Required': 'Neophodan datum', 'Date Required Until': 'Datum potreban do', 'Date Sent': 'Datum slanja', 'Date Taken': 'Datum preuzimanja', 'Date Until': 'Datum do', 'Date/Time': 'Datum/Vrijeme', 'Date/Time of Alert': 'Vrijeme i datum uzbune', 'Date/Time of Dispatch': 'Vrijeme i datum raspodjele', 'Date/Time of Find': 'Datum/Vrijeme pretrage', 'Date/Time when found': 'Datum/Vrijeme kada je pronađeno', 'Date/Time when last seen': 'Dan/Vrijeme posljednjeg viđenja', 'Day': 'Dan', 'db': 'baza podataka', 'DC': 'DC', 'De-duplicate': 'Ukloni duplikat', 'De-duplicate Records': 'Ukloni duple slogove', 'De-duplicator': 'De-duplicator(Ukloni duple)', 'Dead Bodies': 'Mrtva Tijela', 'Dead Body': 'Leš', 'Dead Body Details': 'Detalji o mrtvim tijelima', 'Dead body report added': 'Dodat izvještaj o mrtvom tijelu', 'Dead body report deleted': 'Obrisan izvještaj o mrtvom tijelu', 'Dead body report updated': 'Ažuriran izvještaj o mrtvom tijelu', 'Dead Body Reports': 'Izvještaj o mrtvim tijelima', 'Deaths in the past 24h': 'Broj smrtnih slučajeva u protekla 24 sata', 'Deaths/24hrs': 'Smrtnost/24h', 'Debug': 'Praćenje grešaka', 'deceased': 'preminuo', 'Deceased': 'Preminuo', 'Decimal Degrees': 'Decimalni stepeni', 'DECISION': 'ODLUKA', 'Decision': 'Odluka', 'Decomposed': 'Raspadnuto', 'deep': 'duboko', 'Default': 'Zadano', 'Default Base layer?': 'Podrazumijevani bazni sloj', 'Default Height of the map window.': 'Početna visina prozora mape.', 'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Podrazumijevana visina prozora mape. U rasporedu prozora karta se maksimizira da popuni prozor , nema potrebe da se ovdje postavlja velika vrijednost.', 'Default Location': 'Podrazumijevana lokacija', 'Default Map': 'Osnovna karta', 'Default map question': 'Podrazumijevano pitanje mape', 'Default Marker': 'Zadani Marker', 'Default Realm': 'Zadano carstvo', 'Default Realm = All Entities the User is a Staff Member of': 'Podrazumijevano carstvo = Sve jedinke čiji je korisnik uposlenik', 'Default synchronization policy': 'Uobičajena polica sinhronizacije', 'Default Width of the map window.': 'Početna vrijednost širine prozora mape.', 'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Podrazumijevana širina prozora mape. U rasporedu prozora karta se maksimizira da popuni prozor , nema potrebe da se ovdje postavlja velika vrijednost.', 'Default?': 'Podrazumijevano?', 'Defaults updated': 'Podrazumijevane vrijednosti ažurirane', 'Defecation area for animals': 'Područje za vršenje nužde za životinje', 'deferred': 'odgođen', 'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': 'Definirajte scenarije za raspodjelu prikladnih Resursa ( Ljudi ,sredstva i objekti).', 'Defines the icon used for display of features on handheld GPS.': 'Definiše ikonu korištenu za prikaz karakteristika na ručnom GPS uređaju.', 'Defines the icon used for display of features on interactive map & KML exports.': 'Definira ikonu korištenu za prikaz karakteristika na interaktivnoj mapi i KML exportima.', 'Defines the marker used for display & the attributes visible in the popup.': 'Definira marker korišten za prikaz i atribute vidljive u prozoru.', 'Degrees in a latitude must be between -90 to 90.': 'Stepeni u geografskoj dužini moraju biti između -90 to 90.', 'Degrees in a longitude must be between -180 to 180.': 'Stepeni u geografskoj širini moraju biti između -180 to 180.', 'Degrees must be a number between -180 and 180': 'Stepeni moraju biti broj između -180 i 180', 'Degrees must be a number.': 'Stepen mora biti broj', 'Dehydration': 'Dehidracija', 'Delete': 'Brisanje', 'delete': 'brisanje', 'Delete Affiliation': 'Obriši namještenje', 'Delete Aid Request': 'Obriši zahtjev za pomoć', 'Delete Airport': 'Obriši aerodrom', 'delete all checked': 'Izbriši sve provjerene', 'Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.': 'Obriši sve podatke ovog tipa za koje korisnik ima odobrenje prije postavljanja na server. Ovo je dizajnirano za radne tokove gdje se podaci ažuriraju na vanmrežnim tablicama i šalju samo za čitanje', 'Delete Alternative Item': 'Obriši alternativnu stavku', 'Delete Appraisal': 'Obriši ispunjenje', 'Delete Assessment': 'Brisanje Procjene', 'Delete Assessment Summary': 'Obriši sažetak procjene', 'Delete Asset': 'Obriši sredstvo', 'Delete Asset Log Entry': 'Obriši unosa u zapisniku sredstava', 'Delete Award': 'Obriši nagradu', 'Delete Base Station': 'Obriši baznu stanicu', 'Delete Baseline': 'Izbriši referentnu tačku', 'Delete Baseline Type': 'Obrišite tip referentne tačke', 'Delete Branch': 'Obriši ogranak', 'Delete Brand': 'Obriši proizvođačku marku', 'Delete Budget': 'Obriši budžet', 'Delete Bundle': 'Izbriši paket', 'Delete Case': 'Obriši slučaj', 'Delete Catalog': 'Obriši katalog', 'Delete Catalog Item': 'Obriši stavku kataloga', 'Delete Certificate': 'Obriši certifikat', 'Delete Certification': 'Obriši certifikat', 'Delete Cluster': 'Obriši grupisanje', 'Delete Cluster Subsector': 'Obriši podsektor skupa', 'Delete Commitment': 'Obriši Zajednicu', 'Delete Commitment Item': 'Obriši stavku obaveze', 'Delete Competency': 'Izbriši stručnost', 'Delete Competency Rating': 'Obriši status spremnosti', 'Delete Contact': 'Obriši kontakt', 'Delete Contact Information': 'Obriši informacije o kontaktu', 'Delete Course': 'Obriši kurs', 'Delete Course Certicate': 'Obriši certifikat kursa', 'Delete Course Certificate': 'Obriši certifikat kursa', 'Delete Credential': 'Obriši akreditiv', 'Delete Data from Theme layer': 'Obriši podatke iz tematskog sloja', 'Delete Department': 'Obriši odjeljenje', 'Delete Detail': 'Obriši detalje', 'Delete Distribution': 'Izbriši raspodjelu', 'Delete Distribution Item': 'Obriši distribucijsku stavku', 'Delete Document': 'Obriši dokument', 'Delete Donation': 'Obriši donaciju', 'Delete Donor': 'Obriši donatora', 'Delete Email': 'Obriši e-pošteu', 'Delete Entry': 'Obriši unos', 'Delete Event': 'Obriši događaj', 'Delete Event Type': 'Obriši tip događaja', 'Delete Facility': 'Obriši objekat', 'Delete Facility Type': 'Obriši vrstu objekta', 'Delete Feature Class': 'Brisanje klasa karakteristika', 'Delete Feature Layer': 'Obriši sloj karakteristika', 'Delete Find Report': 'Obriši traženi izvještaj', 'Delete from Server?': 'Izbrisati sa servera?', 'Delete GPS data': 'Obriši GPS podatke', 'Delete Group': 'Obriši grupu', 'Delete Hazard': 'Obriši rizik', 'Delete Heliport': 'Obriši heliodrom', 'Delete Home': 'Izbriši dom', 'Delete Hospital': 'Obriši bolnicu', 'Delete Hours': 'Obriši sate', 'Delete Image': 'Obriši sliku', 'Delete Impact': 'Obriši utjeicaj', 'Delete Impact Type': 'Izbriši tip utjecaja', 'Delete Incident Report': 'Obriši izvještaj o incidentu', 'Delete Inventory Item': 'Brisanje artikla u skladištu', 'Delete Item': 'Obriši stavku', 'Delete Item Category': 'Obriši kategoriju stavke', 'Delete Item from Request': 'Obriši stavku iz zahtjeva', 'Delete Item Pack': 'Obriši paket stavki', 'Delete Item Packet': 'Obriši paket stavki', 'Delete Job Role': 'Izbriši ulogu posla', 'Delete Job Title': 'Obriši radno mjesto', 'Delete Key': 'Obriši ključ', 'Delete Kit': 'Obriši komplet', 'Delete Layer': 'Obriši sloj', 'Delete Level 1 Assessment': 'Izbriši procjenu nivoa 1', 'Delete Level 2 Assessment': 'Obriši procjenu nivoa 2', 'Delete Location': 'Obriši lokaciju', 'Delete Location Hierarchy': 'Obriši hijerarhiju lokacija', 'Delete Mailing List': 'Obriši listu za slanje poruka', 'Delete Map Profile': 'Obriši konfiguraciju mape', 'Delete Marker': 'Obriši marker', 'Delete Membership': 'Obriši članstvo', 'Delete Message': 'Obriši poruku', 'Delete Mission': 'Brisanje misije', 'Delete Morgue': 'Obriši mrtvačnicu', 'Delete Need': 'Obriši potrebu', 'Delete Need Type': 'Obriši tip potrebe', 'Delete Office': 'Obriši kancelariju', 'Delete Office Type': 'Obriši tip kancelarije', 'Delete Order': 'Obriši narudžbu', 'Delete Organization': 'Obriši organizaciju', 'Delete Organization Domain': 'Obriši domenu organizacije', 'Delete Organization Needs': 'Obriši potrebe organizacije', 'Delete Organization Type': 'Obriši tip organizacije', 'Delete Participant': 'Obriši učesnika', 'Delete Partner Organization': 'Obriši partnersku organizaciju', 'Delete Patient': 'Obriši pacijenta', 'Delete Peer': 'Obriši saradnika', 'Delete Person': 'Obriši osobu', 'Delete Photo': 'Obriši fotografiju', 'Delete PoI Type': 'Obriši tačku interesa', 'Delete Point of Interest': 'Obriši tačku interesa', 'Delete Population Statistic': 'Obriši statistiku o populaciji', 'Delete Position': 'Obriši poziciju', 'Delete Post': 'Obriši blok ugradivog teksta', 'Delete Professional Experience': 'Obriši profesionalno iskustvo', 'Delete Program': 'Obriši program', 'Delete Project': 'Obriši projekat', 'Delete Projection': 'Obriši projekciju', 'Delete Rapid Assessment': 'Izbriši brzu procjenu', 'Delete Received Item': 'Izbriši primljenu stavku', 'Delete Received Shipment': 'Obriši primljenu pošiljku', 'Delete Record': 'Obriši zapis', 'Delete Recovery Report': 'Obriši izvještaj o pronalaženju', 'Delete Region': 'Obriši područje', 'Delete Relative': 'Obriši srodnika', 'Delete Report': 'Obriši izvještaj', 'Delete Request': 'Obriši zahtjev', 'Delete Request Item': 'Izbiši stavku zahtjeva', 'Delete Request Template': 'Obriši predložak zahtjeva', 'Delete Resource': 'Obriši resurs', 'Delete Resource Type': 'Obriši tip resursa', 'Delete Role': 'Obriši ulogu', 'Delete Room': 'Obriši sobu', 'Delete saved search': 'Obriši snimljenu pretragu', 'Delete Scenario': 'Obriši scenarij', 'Delete Seaport': 'Obriši luku', 'Delete Section': 'Obriši sekciju', 'Delete Sector': 'Obriši sektor', 'Delete Sent Item': 'Izbriši poslani predmet', 'Delete Sent Shipment': 'Obriši poslanu pošiljku', 'Delete Service': 'Obriši uslugu', 'Delete Service Profile': 'Obriši profil usluge', 'Delete Setting': 'Uklonite postavke', 'Delete Shipment Item': 'Obriši predmet pošiljke', 'Delete Site Needs': 'Obriši potrebe mjesta', 'Delete Skill': 'Obriši vještinu', 'Delete Skill Equivalence': 'Obriši ekvivalenciju vještine', 'Delete Skill Provision': 'Obriši pružanje vještina', 'Delete Skill Type': 'Obriši tip vještine', 'Delete SMS': 'Obriši SMS', 'Delete Staff Assignment': 'Obriši dodjelu osoblja', 'Delete Staff Member': 'Obriši člana osoblja', 'Delete Staff Type': 'Izbriši tip osoblja', 'Delete Status': 'Obriši status', 'Delete Stock Adjustment': 'Obriši prilagođenje zalihe', 'Delete Stock Count': 'Obriši broj zaliha', 'Delete Subscription': 'Izbriši pretplatu', 'Delete Subsector': 'Izbriši podsektor', 'Delete Supplier': 'Obriši dobavljača', 'Delete Survey Answer': 'Izbriši anketni odgovor', 'Delete Survey Question': 'Izbriši anketno pitanje', 'Delete Survey Section': 'Obriši anketnu sekciju', 'Delete Survey Series': 'Izbriši niz pregleda', 'Delete Survey Template': 'Obrišite šablon ankete', 'Delete Symbology': 'Obriši značenje simbola', 'Delete Theme': 'Obriši temu', 'Delete this Assessment Answer': 'Obriši ovaj odgovor ocjene', 'Delete this Assessment Question': 'Obriši ovo pitanje ocjene', 'Delete this Assessment Template': 'Obriši ovaj predložak ocjene', 'Delete this Completed Assessment Form': 'Obriši ovaj formular za završenu procjenu', 'Delete this Disaster Assessment': 'Obriši ovu procjenu katastrofe', 'Delete this Filter': 'Obriši filter', 'Delete this Question Meta-Data': 'Obriši ove metapodatke pitanja', 'Delete this Template Section': 'Obriši ovaj odjeljak predloška', 'Delete Tour': 'Obriši turu', 'Delete Training': 'Obriši obuku', 'Delete Training Event': 'Obriši događaj obuke', 'Delete Tweet': 'Obriši tweet', 'Delete Unit': 'Obriši jedinicu', 'Delete User': 'Obriši korisnika', 'Delete Vehicle': 'Obriši vozilo', 'Delete Vehicle Details': 'Obriši detalje o vozilu', 'Delete Vehicle Type': 'Obriši vrstu vozila', 'Delete Volunteer': 'Obriši volontera', 'Delete Volunteer Cluster': 'Obriši skup volontera', 'Delete Volunteer Cluster Position': 'Obriši poziciju skupa volontera', 'Delete Volunteer Cluster Type': 'Obriši tip skup volontera', 'Delete Volunteer Role': 'Obriši ulogu volontera', 'Delete Warehouse': 'Obriši skladište', 'Delete Warehouse Item': 'Obriši stavku skladišta', 'Delete:': 'Obriši:', 'deleted': 'obrisano', 'Deliver To': 'Isporuka za', 'Delivered By': 'Isporučio', 'Delivered To': 'Isporučeno na', 'Delphi Decision Maker': 'Delphi stvaralac odluka', 'Delphi toma de decisiones': 'Delphi stvaralac odluka', 'Demographic': 'Demografski', 'Demographics': 'Demografija', 'Demonstrations': 'Demonstracije', 'denied': 'odbijeno', 'Dental Examination': 'Pregled zuba', 'Dental Profile': 'Zubni Profil', 'Department / Unit': 'Odjel/Jedinica', 'Department added': 'Odjel dodan', 'Department Catalog': 'Katalog odjeljenja', 'Department deleted': 'Odjel obrisan', 'Department Details': 'Detalji odjeljenja', 'Department updated': 'Odjel ažuriran', 'Deployed': 'Dodijeljeno', 'Deployment': 'Isporuka', 'Deployment Alert': 'Upozorenje o dostavi', 'Deployment Location': 'Lokacija Razvrstavanja', 'Deployment Request': 'Zahtjev za dostavu', 'Describe the condition of the roads from/to the facility.': 'Opišite stanje puteva od/do ovog objekta', 'Describe the condition of the roads to your hospital.': 'Opišite stanje ceste prema Vašoj bolnici.', "Describe the procedure which this record relates to (e.g. 'medical examination')": "Opisati proceduru na koju se odnosi ovaj zapis (npr. 'medicinsko ispitivanje')", 'Description': 'Opis', 'description': 'opis', 'Description of Bin Type': 'Opis korpe za smještaj', 'Description of Contacts': 'Opis kontakta', 'Description of defecation area': 'Opis područja za vršenje nužde', 'Description of drinking water source': 'Opis izvora pitke vode', 'Description of perimeter fencing, security guards, security lighting.': 'Opis veličine ograde, stražara, sigurnosnih svjetala.', 'Description of sanitary water source': 'Opis sanitarnih izvora vode', 'Description of water source before the disaster': 'Opis vodenih izvora prije katastrofe', 'Description:': 'Opis:', 'Descriptive Text (e.g., Prose, etc)': 'Opisni tekst', 'design': 'dizajn', 'Designated for': 'Dizajnirano za', 'Desire to remain with family': 'Želja da se ostane sa porodicom', 'Destination': 'Odredište', 'Destroyed': 'Uništen', 'Detail': 'Detalji', 'Detail added': 'Detalj dodan', 'Detail deleted': 'Detalj obrisan', 'Detail updated': 'Detalj ažuriran', 'Detailed Description/URL': 'Detaljan opis /URL', 'Details': 'Detalji', 'Details field is required!': 'Polje detalji je obavezno', 'Details of Disaster Assessment': 'Detalji procjene katastrofe', 'Details of each question in the Template': 'Detalji svakog pitanja u predlošku', 'Dialysis': 'Dijaliza', 'Diaphragms, horizontal bracing': 'Diafragma, horizontalno učvršćenje', 'Diarrhea': 'Proljev', 'Diarrhea among children under 5': 'Dijareja među djecom mlađom od 5 godina', 'Dignitary Visit': 'Posjeta funkcionera', 'Direction': 'Smijer', 'Disabilities': 'Invaliditeti', 'Disable': 'Onemogući', 'Disabled': 'Onemogućeno', 'Disabled participating in coping activities': 'Učestvovanje osoba sa posebnim potrebama u aktivnostima za suočavanje sa stresom', 'Disabled?': 'Osoba sa invaliditetom?', 'Disaster': 'Katastrofa', 'Disaster Assessment added': 'Procjena katastrofe dodana', 'Disaster Assessment Chart': 'Dijagram procjene katastrofe', 'Disaster Assessment deleted': 'Procjena katastrofe obrisana', 'Disaster Assessment Map': 'Mapa procjene katastrofe', 'Disaster Assessment Summary': 'Rezime procjene katastrofe', 'Disaster Assessment updated': 'Procjena katastrofe ažurirana', 'Disaster Assessments': 'Procjene katastrofe', 'Disaster clean-up/repairs': 'Čišćenje/opravka od nepogode', 'Disaster Victim Identification': 'Identifikacija žrtava nesreće', 'Disaster Victim Registry': 'Registar žrtava katastrofe', 'Discharge (cusecs)': 'Pražnjenje (kubni metar po sekundi)', 'Discharges/24hrs': 'Istovari/24 sata', 'Discussion Forum': 'Forum za diskusiju', 'Discussion Forum on item': 'Forum za rasprave na određenu temu', 'Disease vectors': 'Vektori bolesti', 'diseased': 'bolesni', 'Disk Cache Keys': 'Disk cache ključevi', 'Disk Cleared': 'Disk očišćen', 'Dispatch': 'Isporuči', 'Dispatch Time': 'Vrijeme isporuke', 'Dispensary': 'Dispanzer', 'displaced': 'raseljeni', 'Displaced': 'Raseljen', 'Displaced Populations': 'Raseljeno stanovništvo', 'Display Chart': 'Prikaži dijagram', 'Display name': 'Ime za prikaz', 'Display Polygons?': 'Prikaži poligone?', 'Display Question on Map': 'Prikaži pitanje na karti.', 'Display Routes?': 'Prikazati rute?', 'Display Selected Questions': 'Prikaži izabrana pitanja', 'Display Tracks?': 'Prikaži tragove?', 'Display Waypoints?': 'Prikazati Putne tačke?', 'Dispose': 'Raspoloživ', 'Distance between defecation area and water source': 'Udaljenost između površina gdje se obavlja nužda i izvora vode', 'Distance between latrines and temporary shelter in meters': 'Udaljenost između površina gdje se obavlja nužda i privremenog skloništa', 'Distance between shelter and latrines': 'Udaljenost između skloništa i zahoda', 'Distance from %s:': 'Razdaljina od %s:', 'Distance(Kms)': 'Udaljenost(kilometri)', 'Distributed without Record': 'Raspodjeljeno bez zapisa', 'Distribution': 'Raspodjela', 'Distribution Added': 'Dodata raspodjela', 'Distribution Deleted': 'Raspodjela izbrisana', 'Distribution Details': 'Detalji raspodjele', 'Distribution Groups': 'Distribucijske grupe', 'Distribution groups': 'Distribucijske grupe', 'Distribution Item': 'Distribucijska stavka', 'Distribution Item Added': 'Dodata distribucijska stavka', 'Distribution Item Deleted': 'Stavka raspodjele je obrisana.', 'Distribution Item Details': 'Detalji stavke distribucije', 'Distribution Item Updated': 'Stavka raspodjele je ažurirana', 'Distribution Items': 'Raspodjela stavki', 'Distribution Report': 'Izvještaj raspodjele', 'Distribution Updated': 'Raspodjela ažurirana', 'Distributions': 'Raspodjele', 'District': 'Distrikt', 'divorced': 'razveden', 'Djibouti': 'DžibutiĐibuti', 'DM Planning': 'DM planiranje', 'DNA Profile': 'DNA profil', 'DNA Profiling': 'Prikaz profila preko DNA', 'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': 'Da domaćinstva imaju odgovarajuću opremu i materijale da kuhaju svoju hranu (štednjak, lonci, tanjir, šolje / posude za piće, itd)?', 'Do households have bedding materials available (tarps, plastic mats, blankets)?': 'Da li domaćinstva imaju odgovaraći materijal za ležaje (prekrivače, madrace)?', 'Do households have household water storage containers?': 'Da li domaćinstva imaju spremnike za vodu?', 'Do women and girls have easy access to sanitary materials?': 'Da li žene i djevojke imaju lak pristup sanitarnim materijalima?', 'Do you have access to cash to restart your business?': 'Imate li pristup novcu da ponovo započnete poslovne aktivnosti?', 'Do you know of any incidents of violence?': 'Znate li slučajeve nasilja?', 'Do you know of children living on their own (without adults)?': 'Poznajete li djecu koja žive sama (bez staratelja)?', 'Do you know of children separated from their parents or caregivers?': 'Da li poznajete djecu odvojenu od svojih roditelja/staratelja?', 'Do you know of children that have been sent to safe places?': 'Poznajete li djecu koja su poslana na sigurna mjesta?', 'Do you know of children that have disappeared without explanation in the period since the disaster?': 'Poznajete li djecu koja su nestala bez objašnjenja u periodu nakon katastrofe?', 'Do you know of parents/caregivers missing children?': 'Poznajete li roditelje/Staratelje djece koja su nestala?', 'Do you prefer': 'Da li više volite', 'Do you really want to approve this record?': 'Želite li zaista potvrditi ovaj zapis?', 'Do you really want to delete these records?': 'Da li zaista želite obrisati ove zapise?', 'Do you really want to delete this record? (This action can not be reversed)': 'Želite li zaista obrisati ovaj zapis? (akcija se ne može vratiti', 'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'Želite li otkazati ovu primljenu pošiljku? Predmeti će biti uklonjeni iz inventara. Ova akcija NE MOŽE biti poništena!', 'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': 'Da li želite otkazati ovu posiljku? Artikal će biti vraćen u inventar. Ovo se NE MOŽE poništiti!', 'Do you want to cancel this sent shipment? The items will be returned to the Warehouse. This action CANNOT be undone!': 'Da li želite otkazati ovu pošiljku? Artikli će biti vraćeni u skladište. Ovo se NE MOŽE poništiti!', 'Do you want to commit to this request?': 'Želite li zaista potvrditi ovaj zahtjev?', 'Do you want to complete & close this adjustment?': 'Želite li završiti i zatvoriti ovo prilagođenje?', 'Do you want to complete the return process?': 'Želite li završiti proces vraćanja.', 'Do you want to over-write the file metadata with new default values?': 'Želite li prebrisati metapodatke datoteke s novim podrazumijevanim vrijednostima?', 'Do you want to receive this shipment?': 'Da li želite primiti ovu pošiljku?', 'Do you want to send these Committed items?': 'Da li želite poslati ove izvršene stavke?', 'Do you want to send this shipment?': 'Da li želite poslati ovu pošiljku?', 'Document': 'Dokument', 'Document added': 'Dokumenti dodani', 'Document deleted': 'Dokument obrisan', 'Document Details': 'Detalji dokumenta', 'Document removed': 'Dokument uklonjen', 'Document Scan': 'Skeniranje dokumenta', 'Document updated': 'Dokumenti ažurirani', 'Document:': 'Dokument:', 'Documents': 'Dokumenti', 'Documents and Images': 'Dokumenti i slike', 'Documents and Photos': 'Dokumenti i slike', 'Does this facility provide a cholera treatment center?': 'Da li ovaj objekat pruža tretman prilikom kolere?', 'Doing nothing (no structured activity)': 'Ne radeći ništa ( nema struktuirane aktivnosti )', 'Dollars': 'Dolari', 'Domain': 'Domena', 'Domestic chores': 'Domaći poslovi', 'Dominica': 'Dominika', 'Dominican Republic': 'Dominikanska Republika', "Don't Know": 'Ne znam', 'DONATE': 'DONACIJA', 'Donate to this Request': 'Doniraj zahtjev', 'Donated': 'Donirano', 'Donating Organization': 'Organizacija donatora', 'Donation': 'Donacija', 'Donation Added': 'Donacija dodana', 'Donation Canceled': 'Donacija otkazana', 'Donation Certificate': 'Certifikat o donaciji', 'Donation Details': 'Detalji donacije', 'Donation Phone #': 'telefon za donacije #', 'Donation Updated': 'Donacija ažurirana', 'Donations': 'Donacije', 'done!': 'učinjeno!', 'Donor': 'Donator', 'Donor added': 'Donator dodan', 'Donor deleted': 'Donator obrisan', 'Donor Details': 'Detalji Donatora', 'Donor updated': 'Donator ažuriran', 'Donors': 'donatori', 'Donors Report': 'Izvještaj davaoca', 'Doolie Transportation Ambulance': 'Doolie prevozna kola hitne pomoći', 'Door frame': 'Okvir od vrata', 'Download': 'Preuzmi', 'Download Assessment Form Document': 'Preuzmi formular procjene kao dokument', 'Download Assessment Form Spreadsheet': 'Preuzmi formular procjene kao tablicu', 'Download last build': 'Preuzmi samo posljednje kompajliranje', 'Download OCR-able PDF Form': 'Preuzmi OCR čitljiv PDF Formular', 'Download PDF': 'Preuzmite PDF', 'Download Template': 'Preuzimanje šablona', 'Draft': 'Nacrt', 'Draft Features': 'Nacrt objekata', 'Drag an image below to crop and scale it before uploading it:': 'Povucite sliku ispod da je izrežete i promijenite joj veličinu prije postavljanja.', 'Drainage': 'Drenaža', 'Draw on Map': 'Prikaži na karti', 'Drawing up a Budget for Staff & Equipment across various Locations.': 'Izrada nacrta budžeta za osoblje i opremu na različitim lokacijama.', 'Drill Down by Group': 'Dublja analiza po grupi', 'Drill Down by Incident': 'Dublja analizira po incidentu', 'Drill Down by Shelter': 'Dublja analiza po skloništu', 'Driver Phone Number': 'Telefonski broj vozača', 'Drivers': 'Drajveri', 'Driving License': 'Vozačka dozvola', 'Drop-off Location for Goods?': 'Lokacija za ostavljanje robe?', 'Drought': 'Suša', 'DRRPP Extensions': 'DRRPP proširenja', 'Drugs': 'Lijekovi', 'Dry Dock': 'Suho sidrište', 'Due %(date)s': 'Rok %(date)s', 'Dug Well': 'Iskopani bunar', 'Dump': 'Izdvajanje', 'Duplicate': 'Dupliciraj', 'duplicate': 'duplikat', 'Duplicate Locations': 'Dupliraj lokacije', 'Duplicate?': 'Napraviti kopiju?', 'Duration': 'Trajanje', 'Duration (months)': 'Trajanje (mjeseci)', 'Dust Storm': 'Prašnjava oluja', 'DVI Navigator': 'DVI Navigator', 'Dwelling': 'Stambeni', 'Dwellings': 'Stambene jedinice', 'dyed': 'umrli', 'E-mail': 'E-pošta', 'Early Recovery': 'Rani oporavak', 'Early warning': 'Rano upozorenje', 'Ears, angle': 'Uši, Uši, ugao', 'Ears, size': 'Uši, veličina', 'Earth Enabled?': 'Zemlja uključena?', 'Earthquake': 'Zemljotres', 'East Timor': 'Istočni Timor', 'Easy access to sanitation items for women/girls': 'Lak pristup sanitarnim predmetima za žene/djevojke', 'Ecuador': 'Ekvador', 'Edit': 'Izmijeni', 'edit': 'uredi', 'Edit %(site_label)s Status': 'Uredi %(site_label)s status', 'Edit %(type)s': 'Uredi %(type)s', "Edit 'More Info'": 'Uredi dodatne podatke', 'Edit a Missing Person': 'Uredi nestalu osobu', 'Edit Activity': 'Uredi aktivnost', 'Edit Activity Organization': 'Uredi organizaciju', 'Edit Activity Type': 'Uredi tip aktivnosti', 'Edit Address': 'Uredi adresu', 'Edit Adjustment': 'Uredi podešavanja', 'Edit Affiliation': 'Uredi namještenje', 'Edit Airport': 'Uredi aerodrom', 'Edit Alternative Item': 'Uredi alternativnu stavku', 'Edit Annual Budget': 'Uredi godišnji budžet', 'Edit Application': 'Uredi aplikaciju', 'Edit Appraisal': 'Uredi ispunjenje', 'Edit Assessment': 'Uredi procjenu', 'Edit Assessment Answer': 'Uredi odgovor ocjene', 'Edit Assessment Question': 'Uredi pitanje ocjene', 'Edit Assessment Summary': 'Izmjena sažetka procjene', 'Edit Assessment Template': 'Uredi predložak ocjene', 'Edit Asset': 'Uredi sredstvo', 'Edit Asset Log Entry': 'Uredi stavku zapisnika o sredstvima', 'Edit Award': 'Uredi nagradu', 'Edit Base Station': 'Uredi baznu stanicu', 'Edit Baseline': 'Uredi referentnu tačku', 'Edit Baseline Type': 'Uredi Tip Referentne tačke', 'Edit Beneficiaries': 'Uredi korisnika', 'Edit Beneficiary Type': 'Uredi tip korisnika', 'Edit Branch Organization': 'Uredi ogranak organizacije', 'Edit Brand': 'Uredi proizvođačku marku', 'Edit Budget': 'Promjeni budžet', 'Edit Bundle': 'Promjeni paket', 'Edit Camp': 'Uredi kamp', 'Edit Camp Service': 'Uredi uslugu kampa', 'Edit Camp Status': 'Uredi status kampa', 'Edit Camp Type': 'Uredi tip kampa', 'Edit Campaign': 'Uredi kampanju', 'Edit Campaign Message': 'Uredi poruku kampanje', 'Edit Case': 'Uredi slučaj', 'Edit Catalog': 'Uredi katalog', 'Edit Catalog Item': 'Uredi stavku kataloga', 'Edit Certificate': 'Uredi certifikat', 'Edit Certification': 'Uredi certifikaciju', 'Edit Cluster': 'Uredi grupisanje', 'Edit Cluster Subsector': 'Uredi podsektor skupa', 'Edit Commitment': 'Uredi zaduženje', 'Edit Commitment Item': 'Uredi stavku zaduženje', 'Edit Committed People': 'Uredi zadužene ljude', 'Edit Committed Person': 'Uredi zaduženu osobu', 'Edit Community Details': 'Uredi podatke zajednice', 'Edit Competency': 'Uredi kompetentnost', 'Edit Competency Rating': 'Uredi status spremnosti', 'Edit Completed Assessment Form': 'Uredi završen formular ocjene', 'Edit Config': 'Izmijeni konfiguraciju', 'Edit Contact': 'Uredi kontakt', 'Edit Contact Details': 'Uredi detalje kontakta', 'Edit Contact Information': 'Uredi informacije o kontaktu', 'Edit Contents': 'Uredi sadržaj', 'Edit Course': 'Uredi kurs', 'Edit Course Certicate': 'Uredi certifikat za tečaj', 'Edit Course Certificate': 'Uredi certifikat kursa', 'Edit Credential': 'Uredi akreditiv', 'Edit current record': 'Uredi trenutni zapis', 'Edit Dead Body Details': 'Uredi detalje izvještaja o mrtvim tijelima', 'Edit Department': 'Uredi odjeljenje', 'Edit Description': 'Uredi opis', 'Edit Details': 'Uredi detalje', 'Edit Disaster Victims': 'Uredi žrtve katastrofe', 'Edit Distribution': 'Uredi raspodjelu', 'Edit Distribution Item': 'Uredi stavku raspodjele', 'Edit Document': 'Uredi dokument', 'Edit Donation': 'Uredi donaciju', 'Edit Donor': 'Uredi donatora', 'Edit DRRPP Extensions': 'Uredi DRRPP proširenja', 'Edit Education Details': 'Uredi podatke o obrazovanju', 'Edit Education Level': 'Uredi nivo obrazovanja', 'Edit Email': 'Izmijeni e-mail', 'Edit Email Settings': 'Uredi postavke Email-a', 'Edit Entry': 'Uredi unos', 'Edit Event': 'Uredi događaj', 'Edit Event Type': 'Uredi tip događaja', 'Edit Experience': 'Uredi iskustvo', 'Edit Facility': 'Uredi objekat', 'Edit Facility Type': 'Uredi vrstu objekta', 'Edit Feature Class': 'Uredi klasu karakteristika', 'Edit Feature Layer': 'Uredi sloj karakteristika', 'Edit Flood Report': 'Uređivanje izvještaja o poplavi', 'Edit Gateway Settings': 'uredi postavke gatewy-a', 'Edit GPS data': 'Uredi GPS podatke', 'Edit Group': 'Uredi grupu', 'Edit Hazard': 'Uredi rizik', 'Edit Heliport': 'Uredi heliodrom', 'Edit Home': 'Uredi kuću', 'Edit Hospital': 'Uredi bolnicu', 'Edit Hours': 'Uredi sate', 'Edit Human Resource': 'Uredi ljudske resurse', 'Edit Identification Report': 'Uredi izvještaj o identifikacijama', 'Edit Identity': 'Uredi identitet', 'Edit Image Details': 'Uredi detalje slike', 'Edit Impact': 'Uredi utjecaj', 'Edit Impact Type': 'Uredi tip utjecaja', 'Edit Import File': 'Uredi uvezeni fajl', 'Edit Incident': 'Uredi incident', 'Edit Incident Report': 'Uredi izvještaj o incidentu', 'Edit Incident Type': 'Uredi tip incidenta', 'Edit Inventory Item': 'Uredi stavku zalihe ', 'Edit Item': 'Uredi stavku', 'Edit Item Catalog': 'Izmijeni stavku u katalogu', 'Edit Item Catalog Categories': 'Uredi kategorije stavki kataloga', 'Edit Item Category': 'Uredi kategoriju stavke', 'Edit Item in Request': 'Uredi stavku u zahtjevu', 'Edit Item Pack': 'Uredi paket stavki', 'Edit Item Packet': 'Uredi paket stavki', 'Edit Item Sub-Categories': 'Uredi podkategorije stavki', 'Edit Job': 'Uredi posao', 'Edit Job Role': 'Uredi opis posla', 'Edit Job Title': 'Uredi radno mjesto', 'Edit Key': 'Uredi ključ', 'Edit Keyword': 'Uredi ključnu riječ', 'Edit Kit': 'Uredi komplet', 'Edit L4': 'Da li urediti lokacije nivoa 4?', 'Edit L5': 'Da li urediti lokacije nivoa 4?', 'Edit Layer': 'Uredi sloj', 'Edit Level %d Locations?': 'Da li urediti lokacije nivoa %d ?', 'Edit Level 1 Assessment': 'Editovanje procjena Nivoa 1', 'Edit Level 2 Assessment': 'Izmjeni procjenu 2. nivoa', 'Edit Location': 'Uredi lokaciju', 'Edit Location Details': 'Uredi detalje lokacije', 'Edit Location Hierarchy': 'Uredi hijerarhiju lokacija', 'Edit Log Entry': 'Uredi unos zapisnika', 'Edit Logged Time': 'Uredi stavku zapisnika', 'Edit Mailing List': 'Uredi listu za slanje poruka', 'Edit Map Profile': 'Uredi konfiguraciju mape', 'Edit Map Services': 'Uredi usluge mape', 'Edit Marker': 'Uredi marker', 'Edit Membership': 'Uredi članstvo', 'Edit Message': 'Uredi poruku', 'Edit message': 'Uredi poruku', 'Edit Messaging Settings': 'Uredite postavke poruka', 'Edit Metadata': 'Uredi metapodatke', 'Edit Milestone': 'Uredi prekretnicu', 'Edit Mission': 'Izmjeni misiju', 'Edit Mobile Commons Settings': 'Uredi mobilne postavke', 'Edit Modem Settings': 'Uredi postavke modema', 'Edit Need': 'Uredi potrebu', 'Edit Need Type': 'Uredi tip potrebe', 'Edit Network': 'Uredi mrežu', 'Edit Note': 'Uredi napomenu', 'Edit Office': 'Uredi kancelariju', 'Edit Office Type': 'Uredi tip kancelarije', 'Edit Options': 'Izmjeni opcije', 'Edit Order': 'Uredi narudžbu', 'Edit Organization': 'Uredi organizaciju', 'Edit Organization Domain': 'Uredi domen organizacije', 'Edit Organization Needs': 'Uredi potrebe organizacije', 'Edit Organization Type': 'Uredi tip organizacije', 'Edit Output': 'Uredi izlaz', 'Edit Page': 'Uredi stranicu', 'Edit Parameters': 'Uredi parametre', 'Edit Parser Connection': 'Uredi parsersku konekciju', 'Edit Participant': 'Uredi učesnika', 'Edit Partner': 'Uredi partnera', 'Edit Partner Organization': 'Uredi partnersku organizaciju', 'Edit Patient': 'Uredi pacijenta', 'Edit Peer': 'Uredi suradnika', 'Edit Peer Details': 'Promjena detalja saradnika', 'Edit Permissions for %(role)s': 'Uredi dopuštenja %(role)s', 'Edit Person': 'Uredi osobu', 'Edit Person Details': 'Uredi detalje osobe', "Edit Person's Details": 'Uredi detalje o osobi', 'Edit Personal Effects Details': 'Uredi detalje ličnih uticaja', 'Edit Photo': 'Uredi fotografiju', 'Edit Pledge': 'Uredi podršku', 'Edit PoI Type': 'Uredi tačku interesa', 'Edit Point of Interest': 'Uredi tačku interesa', 'Edit Policy or Strategy': 'Uredi politiku ili strategiju', 'Edit Population Statistic': 'Izmjeni statistiku stanovništva', 'Edit Position': 'Uredi poziciju', 'Edit Post': 'Uredi blok ugradivog teksta', 'Edit Problem': 'Uredi problem', 'Edit Professional Experience': 'Uredi profesionalno iskustvo', 'Edit Profile': 'Izmijeni profil', 'Edit Profile Configuration': 'Uredi konfiguraciju profila', 'Edit Program': 'Uredi program', 'Edit Project': 'Uredi projekat', 'Edit Project Organization': 'Uredi organizaciju projekta', 'Edit Projection': 'Uredi projekciju', 'Edit Question Meta-Data': 'Uredi metapodatke pitanja', 'Edit Rapid Assessment': 'Uredi brzu procjenu', 'Edit Received Item': 'Uredi primljeni predmet', 'Edit Received Shipment': 'Uredite primljenu pošiljku', 'Edit Record': 'Uredi zapis', 'Edit Recovery Details': 'Izmijeni detalje pronalaženja', 'Edit Region': 'Uredi područje', 'Edit Registration': 'Uredi registraciju', 'Edit Registration Details': 'uredi detalje registracije', 'Edit Relative': 'Uredi srodnike', 'Edit Relief Item': 'Uredi stavku pomoći', 'Edit Repository Configuration': 'Uredi konfiguraciju repozitorija', 'Edit Request': 'Uredi zahtjev', 'Edit Request Details': 'Zatraži detalje o zahtjevu', 'Edit Request Item': 'Izmjeni stavku zahtjeva', 'Edit Request Template': 'Uredi predložak zahtjeva', 'Edit Requested Skill': 'Uredi tražene vještine', 'Edit Resource': 'Uredi resurs', 'Edit Resource Configuration': 'Uredi konfiguraciju resursa', 'Edit Resource Type': 'Uredi tip resursa', 'Edit Response Summary': 'Dodaj sumarni odgovor', 'Edit River': 'Izmjeni rijeku', 'Edit Role': 'Uredi ulogu', 'Edit roles for': 'Uredi uloge za', 'Edit Room': 'Uredi sobu', 'Edit RSS Settings': 'Uredi RSS Postavke', 'Edit saved search': 'Uredi sačuvanu pretragu', 'Edit Scenario': 'Izmijeni scenarij', 'Edit School District': 'Uredi školski rejon', 'Edit School Report': 'Izmijeni školski izvještaj', 'Edit Seaport': 'Uredi luku', 'Edit Sector': 'Uredi sektor', 'Edit Sender Priority': 'Uredi prioritet pošiljaoca', 'Edit Sent Item': 'Uredi poslani predmet', 'Edit Series': 'Uredi seriju', 'Edit Service': 'Uredi uslugu', 'Edit Setting': 'Uredi postavke', 'Edit Settings': 'Izmjeni postavke', 'Edit Shelter': 'Uredi sklonište', 'Edit Shelter Service': 'Uredi uslugu skloništa', 'Edit Shelter Status': 'Uredi status skloništa', 'Edit Shelter Type': 'Uredi tip skloništa', 'Edit Shipment Item': 'Uredi predmet pošiljke', 'Edit Shipment to Send': 'Uredi pošiljku za slanje', 'Edit Site Needs': 'Uredi potrebe mjesta', 'Edit Skill': 'Uredi vještinu', 'Edit Skill Equivalence': 'Uredi ekvivalenciju vještina', 'Edit Skill Provision': 'Uredi pružanje vještine', 'Edit Skill Type': 'Uredi tip vještine', 'Edit SMS': 'Uredi SMS', 'Edit SMS Outbound Gateway': 'Uredi SMS izlaz', 'Edit SMS Settings': 'Uredi SMS postavke', 'Edit SMTP to SMS Settings': 'Uredi SMTP-SMS postavke', 'Edit Solution': 'Uredi rješenja', 'Edit Source': 'Uredi izvor', 'Edit Staff': 'Izmijeni osoblje', 'Edit Staff Assignment': 'Uredi dodjelu osoblja', 'Edit Staff Member Details': 'Uredi detalje člana osoblja', 'Edit Staff Type': 'Izmijeni tip osoblja', 'Edit Status': 'Uredi status', 'Edit Status Report': 'Uredi statusni izvještaj', 'Edit Stock Count': 'Uredi zalihu skladišta', 'Edit Storage Bins': 'Uredi korpe za smještaj', 'Edit Storage Location': 'Uredi lokacije skladišta', 'Edit Subscription': 'Uredi pretplatu', 'Edit Subsector': 'Uredi podsektor', 'Edit Supplier': 'Uredi dobavljača', 'Edit Survey Answer': 'Uredi odgovor ankete', 'Edit Survey Question': 'Uredi pitanja upitnika', 'Edit Survey Series': 'Uredi niz anketa', 'Edit Survey Template': 'Dodaj šablon za anketu', 'Edit Symbology': 'Uredi značenje simbola', 'Edit Sync Settings': 'Izmieni postavke sinhronizacije', 'Edit Synchronization Settings': 'Uredi Postavke sinhronizacije', 'Edit Tag': 'Uredi oznaku', 'Edit Task': 'Uredi zadatak', 'Edit Team': 'Uredi tim', 'Edit Template Section': 'Uredi odjeljak predloška', 'Edit the OpenStreetMap data for this area': 'Uredi OpenStreetMap podatke za ovo područje', 'Edit Theme': 'Uredi temu', 'Edit Theme Data': 'Uredi podatke teme', 'Edit Themes': 'Uredi teme', 'Edit this Disaster Assessment': 'Obriši ovu procjenu katastrofe', 'Edit this entry': 'Obriši ovaj unos', 'Edit Ticket': 'Uredi karticu', 'Edit Tour': 'Uredi turu', 'Edit Track': 'Uredi praćenje', 'Edit Training': 'Uredi obuku', 'Edit Training Event': 'Uredi događaj obuke', 'Edit Tropo Settings': 'Uredi Tropo postavke', 'Edit Twilio Settings': 'Uredi Twilio postavke', 'Edit Twitter account': 'Uredi twitter nalog', 'Edit Twitter Search Query': 'Uredi Twitter upit za pretragu', 'Edit Unit': 'Uredi jedinicu', 'Edit User': 'Uredi korisnika', 'Edit Vehicle': 'Uredi vozilo', 'Edit Vehicle Assignment': 'Uredi dodjelu vozila', 'Edit Vehicle Details': 'Uredi detalje o vozilu', 'Edit Vehicle Type': 'Uredi tip vozila', 'Edit Volunteer Availability': 'Uredi Dostupnost Volontera', 'Edit Volunteer Cluster': 'Uredi skup volontera', 'Edit Volunteer Cluster Position': 'Uredi poziciju skupa volontera', 'Edit Volunteer Cluster Type': 'Uredi tip skupa volontera', 'Edit Volunteer Details': 'Uredi detalje skupa volontera', 'Edit Volunteer Role': 'Uredi ulogu volontera', 'Edit Warehouse': 'Uredi skladište', 'Edit Warehouse Item': 'Iredi stavku skladišta', 'Edit Warehouse Stock': 'Uredi zalihu skladišta', 'Edit Web API Settings': 'Uredi Web API postavke', 'Editable?': 'Izmjenjivo?', 'editor': 'uređivač', 'Education': 'Obrazovanje', 'Education Details': 'Detalji o obrazovanju', 'Education details added': 'Detalji o obrazovanju dodani', 'Education details deleted': 'Detalji o obrazovanju obrisani', 'Education details updated': 'Detalji o obrazovanju ažurirani', 'Education Level': 'Nivo obrazovanja', 'Education Level added': 'Nivo obrazovanja dodan', 'Education Level deleted': 'Nivo obrazovanja obrisan', 'Education Level updated': 'Nivo obrazovanja ažuriran', 'Education Levels': 'Nivoeiobrazovanja', 'Education materials received': 'Primljeni obrazovni materijali', 'Education materials, source': 'Edukacijski materijal, izvor', 'Effects Inventory': 'Popis efekata', 'Effort Report': 'Izvještaj o uloženom radu', 'eg. gas, electricity, water': 'npr. gas, struja, voda', 'Eggs': 'Jaja', 'Egypt': 'Egipat', 'Either a shelter or a location must be specified': 'Sklonište ili lokacija moraju biti specificirani', 'Either file upload or document URL required.': 'Ili prijenos datoteka ili URL dokumenta potreban.', 'Either file upload or image URL required.': 'Upload-ujte file ili URL zadane slike', 'Elderly person headed households (>60 yrs)': 'Domaćinstva vođena od strane starijih osoba (>60 yrs)', 'Electrical': 'Električno', 'Electrical, gas, sewerage, water, hazmats': 'Električni, plinski, kanalizacioni, vodeni, zaštita', 'Elevated': 'Uzdignuto', 'Elevators': 'Liftovi', 'Email': 'Elektronska pošta', 'Email (Inbound)': 'Elektronska pošta (dolazna)', 'Email Account deleted': 'Nalog elektronske pošte obrisan', 'Email Accounts': 'Nalozi e-pošte', 'Email Address': 'Email adresa', 'Email Address to which to send SMS messages. Assumes sending to phonenumber@address': 'Email adresa na koju treba poslati SMS poruke. Pretpostavlja se slanje na brojtelefona@adresa', 'Email created': 'Email kreiran', 'Email deleted': 'Email obrisan', 'Email Details': 'Detalji elektronske pošte', 'Email InBox': 'Ulaz e-pošte', 'Email Settings': 'Postavke e-pošte', 'Email Settings updated': 'Twilio postavke ažurirane', 'Email settings updated': 'Postavke email-a ažurirane', 'Embalming': 'Balzamovanje', 'Embassy': 'Ambasada', 'embedded': 'ugrađeno', 'Emergency Capacity Building project': 'Projekat hitne izgradnje kapaciteta', 'Emergency Contacts': 'Hitni kontakti', 'Emergency Department': 'Odjel za hitne slučajeve', 'Emergency Medical Services': 'Hitne medicinske službe', 'Emergency Shelter': 'Hitno sklonište', 'Emergency Support Facility': 'Objekat za podršku u hitnim slučajevima', 'Emergency Support Service': 'Služba za hitnu podršku', 'Emergency Telecommunications': 'Telekomunikacije u hitnim slučajevima', 'EMS Reason': 'Razlog za slanje hitne pomoći', 'EMS Status': 'Status hitne medicinske službe', 'EMS Status Reasons': 'Razlozi EMS statusa', 'EMS Traffic Status': 'Status EMS saobraćaja', 'Enable': 'Omogući', 'Enable in Default Config?': 'Uključiti u podrazumijevanoj konfiguraciji=', 'Enable/Disable Layers': 'Omogućite/Onemogućite slojeve', 'Enabled': 'Omogućen', 'Enabled?': 'Omogućeno?', 'Enabling MapMaker layers disables the StreetView functionality': 'Omogućavanje slojeva za izrađivanje karata onemogućuje funkcionalnosti StreetView-a', 'enclosed area': 'ograđeni prostor', 'End date': 'Krajnji datum', 'End Date': 'Završni datum', 'End date should be after start date': 'Krajnji datum mora biti nakon početnog', 'End of Period': 'Kraj Perioda', 'English': 'engleski', 'Enter a date before': 'Unesi datum prije', 'Enter a GPS Coord': 'Unesi GPS koordinate', 'Enter a location': 'Unesi lokaciju', 'Enter a name for the spreadsheet you are uploading (mandatory).': 'Unesite ime za proračunsku tablicu (spreadsheet) koju uploadate (obavezno).', 'Enter a name for the spreadsheet you are uploading.': 'Unesite ime za tabelarni prikaz koji učitavate', 'Enter a new support request.': 'Unesi novi zahtjev za podršku', 'Enter a number between %(min)g and %(max)g': 'Enter a number between %(min)g and %(max)g', 'enter a number between %(min)g and %(max)g': 'unesite broj između %(min)godina i %(max)godina', 'Enter a summary of the request here.': 'Unesi rezime zahtjeva ovdje', 'Enter a unique label!': 'Unesite jedinstvenu oznaku!', 'Enter a valid date before': 'Unesi validan datum prije', 'Enter a valid email': 'Unesite validan email', 'Enter a valid future date': 'Unesite validan datum u budućnosti', 'Enter a valid past date': 'Unesite valjan rok trajanja', 'Enter a valid phone number': 'Unesite važeći broj telefona', 'enter a value': 'unesite vrijednost', 'Enter a value carefully without spelling mistakes, this field needs to match existing data.': 'Unesite vrijednost pažljivo bez grešaka u kucanju, jer se ovo polje mora usaglastiti s postojećim podacima.', 'Enter an integer between %(min)g and %(max)g': 'Enter an integer between %(min)g and %(max)g', 'enter an integer between %(min)g and %(max)g': 'Unesi cijeli broj između %(min)g i %(max)g', 'Enter an integer greater than or equal to %(min)g': 'Enter an integer greater than or equal to %(min)g', 'Enter Completed Assessment': 'Unesi završenu ocjenu', 'Enter Completed Assessment Form': 'Unesi završen formular ocjene', 'Enter Coordinates in Deg Min Sec': 'Unesi kordinate u stepenima, minutama i sekundama', 'Enter Coordinates:': 'Unesi kordinate:', 'enter date and time': 'unesite datum i vrijeme', 'enter date and time in range %(min)s %(max)s': 'unesite datum i vrijeme u opsegu %(min)s %(max)s', 'enter date and time on or after %(min)s': 'unesi datum i vrijeme za %(min)s', 'enter date and time on or before %(max)s': 'unesi datum i vrijeme prije %(max)s', 'Enter phone number in international format like +46783754957': 'Unesite telefonski broj u internacionalnom formatu poput +46783754957', 'Enter some characters to bring up a list of possible matches': 'Unesite neke znakove kako biste pozvali listu mogucih poklapanja', 'Enter some characters to bring up a list of possible matches.': 'Upišite nekoliko početnih karaktera da biste vidjeli listu mogućih podudarnosti.', 'Enter tags separated by commas.': 'Unesite oznake odvojene zarezima.', 'Enter the data for an assessment': 'Unijeti podatke za procjenu', 'Enter the same password as above': 'Unesi istu lozinku kao iznad', 'Enter your first name': 'Unesite vaše ime ', 'Enter your firstname': 'Unesite svoje ime', 'Enter your organisation': 'Unesi svoju organizaciju', 'Enter your organization': 'Unesite vašu organizaciju', 'Entered': 'Uneseno', 'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Unos telefonskog broja je proizvoljan, ali ukoliko se odlučite da unesete možete se registrovati da primate SMS poruke.', 'Entity': 'jedinka', 'Entity Information': 'Informacije o jedinki', 'Entity Type': 'Tip entiteta', 'Entry added to Asset Log': 'Stavka dodana u zapisnik sredstava', 'Entry deleted': 'Unos izbrisan', 'Environment': 'Okruženje', 'Equatorial Guinea': 'Ekvatorijalna Gvineja', 'Equipment': 'Oprema', 'ER Status': 'Status hitne pomoći', 'ER Status Reason': 'Razlog ER statusa', 'Eritrea': 'Eritreja', 'Error encountered while applying the theme.': 'Desila se greška pri primjenjivanju teme.', 'Error in message': 'Greška u poruci', "Error logs for '%(app)s'": 'Zapisnici grešaka za "%(app)s"', 'Error reading file (invalid format?): %(msg)s': 'Greška čitanja datoteke (pogrešan format?): %(msg)s', 'Error sending message': 'Greška pri slanju poruke', 'Error sending message!': 'Greška pri slanju poruke!', 'Error Tickets': 'Kartice Grešaka', 'Errors': 'Greške', 'ESRI Shape File': 'ESRI datoteka likova', 'Essential Staff?': 'Suštinski bitno osoblje?', 'Est. Delivery Date': 'Procijenjeni datum isporuke', 'Estimated # of households who are affected by the emergency': 'Procijenjen broj domaćinstava koja su pogođena od nesreće', 'Estimated # of people who are affected by the emergency': 'Procjenjen broj ljudi koji su pogođeni krizom', 'Estimated Delivery Date': 'Procijenjeni Datum Isporuke', 'Estimated Overall Building Damage': 'Ukupna estimirana građevinska šteta', 'Estimated Reopening Date': 'Procijenjeni datum ponovnog otvaranja', 'Estimated total number of people in institutions': 'Procjenjen ukupan broj ljudi u institucijama', 'Estimated Value': 'Procjenjena vrijednost', 'Estimated Value per Pack': 'Procijenjena vrijednost po paketu', 'Estonia': 'Estonija', 'Ethiopia': 'Etiopija', 'Ethnicity': 'Nacionalnost', 'Euros': 'Eura', 'Evacuating': 'Evakuacija', 'Evacuation drills': 'Vježbe evakuacije', 'Evacuation is short-term whilst storm passing e.g. 12 hours, hence people need less space.': 'Evakuacija je kratkoročna do prolaska oluje, npr. 12 sati, stoga ljudima treba manje prostora.', 'Evacuation Route': 'Put evakuacije', 'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Procjeni informaciju u ovoj poruci.(Ova vrijednost NE BI TREBALA BITI korištena u javnim aplikacijama za upozorenje', 'Event': 'Dogadaj', 'Event added': 'Događaj dodan', 'Event deleted': 'Događaj obrisan', 'Event Details': 'Detalji događaja', 'Event Resource': 'Resurs događaja', 'Event Time': 'Vrijeme događaja', 'Event Type': 'Tip događaja', 'Event type': 'Tip događaja', 'Event Type added': 'Tip događaja dodan', 'Event Type Details': 'Detalji o vrsti događaja', 'Event Type removed': 'Tip događaja obrisan', 'Event Type updated': 'Tip događaja ažuriran', 'Event Types': 'Tipovi događaja', 'Event updated': 'Događaj ažuriran', 'Events': 'Događaji', 'Example': 'Primjer', 'Exceeded': 'Prekoračeno', 'Excellent': 'Odlično', 'Exclude contents': 'Isključi sadržaj', 'Excreta disposal': 'Sanitarni čvor', 'Execute a pre-planned activity identified in <instruction>': 'Izvrši unaprijed planiranu aktvinost identificiranu u <instrukciji>', 'Exercise': 'Vježba', 'EXERCISE': 'Vježba', 'Exercise?': 'Vježba?', 'Exercises mean all screens have a watermark & all notifications have a prefix.': 'Vježbe znače da svi ekrani imaju vodeni žig i sve obavijesti imaju isti prefiks.', 'Existing food stocks': 'Postojeće zalihe hrane', 'Existing food stocks, main dishes': 'Postojeće zalihe hrane, glavni artikli', 'Existing food stocks, side dishes': 'Postojeće zalihe hrane, pomoćni artikli', 'Existing location cannot be converted into a group.': 'Postojeća lokacija ne može biti pretvorena u grupu', 'Existing Placard Type': 'Postojeći tip plakata', 'Existing Sections': 'Postojeća odjeljenja', 'Exits': 'Izlazi', 'Expected In': 'Očekivano u', 'Expected Out': 'Očekivano', 'Expected Return Home': 'Očekivani povratak u dom', 'Experience': 'Iskustvo', 'Expiration Date': 'Datum isteka', 'Expiration Details': 'Detalji isteka', 'Expiration Report': 'Izvještaj o isteku', 'Expired': 'Istekao', 'Expired?': 'Istekao?', 'Expiring Staff Contracts Report': 'Izvještaj o osoblju kome ističe ugovor', 'Expiry (months)': 'Ističe (mjeseci)', 'Expiry date': 'Datum isteka', 'Expiry Date': 'Rok valjanosti', 'Expiry Date/Time': 'Vrijeme i datum isteka', 'Expiry Time': 'Vrijeme isteka', 'Explosive Hazard': 'Opasnost od eksplozije', 'Export': 'Izvezi', 'Export all Completed Assessment Data': 'Izvezi sve podatke o završenoj procjeni', 'Export as': 'Izvezi kao', 'export as csv file': 'izvezi kao CSV dokument', 'Export Data': 'Izvezi podatke', 'Export Database as CSV': 'Izvezi bazu podataka kao CSV', 'Export in %(format)s format': 'Izvoz u %(format)s formatu', 'Export in GPX format': 'Izvoz u GPX formatu', 'Export in KML format': 'Poslati u KML formatu', 'Export in OSM format': 'Izvoz u OSM formatu', 'Export in PDF format': 'Izvesti u PDF formatu', 'Export in RSS format': 'Eksportujte u RSS formatu', 'Export in XLS format': 'Izvesti u XLS formatu', 'Exterior and Interior': 'Vanjski i unutrašnji', 'Exterior Only': 'Samo vanjski dio', 'External Features': 'Spoljnje mogućnosti', 'Eye Color': 'Boja očiju', 'Eyebrows, Peculiarities': 'Obrve, specifičnosti', 'Eyebrows, Shape': 'Obrve, oblik', 'Eyebrows, Thickness': 'Obrve, debljina ', 'Eyes, Colour': 'Oči, boja', 'Eyes, Distance between Eyes': 'Oči, razmak između očiju', 'Eyes, Peculiarities': 'Oči, specifičnosti', 'Eyes, Shade': 'Oči, sjena', 'Face': 'Lice', 'Facebook': 'Facebook', 'Facial hair, color': 'Dlake po licu, boja', 'Facial hair, Colour': 'Dlake po licu, boja', 'Facial hair, comment': 'Dlake po licu, komentar', 'Facial hair, length': 'Dlake po licu, dužina', 'Facial hair, type': 'Dlake na licu, tip', 'Facial hair, Type': 'Dlake na licu, tip', 'Facial hear, length': 'Brada, dužina', "Facilitate uploading of missing person's photograph": 'Omogućite slanje fotografije nedostajuće osobe', 'Facilities': 'Objekti', 'Facility': 'Objekat', 'Facility added': 'Dodat objekat', 'Facility Contact': 'Kontakt vezan za objekat', 'Facility deleted': 'Obrisan objekat', 'Facility Details': 'Detalji objekta', 'Facility Operations': 'Aktivnosti objekta', 'Facility or Location': 'Objekat / Lokacija', 'Facility removed': 'Objekat uklonjen', 'Facility Status': 'Stanje objekta', 'Facility Type': 'Vrsta objekta', 'Facility Type added': 'Vrsta objekta dodana', 'Facility Type deleted': 'Vrsta objekta obrisana', 'Facility Type Details': 'Detalji o vrsti objekta', 'Facility Type updated': 'Vrsta objekta ažurirana', 'Facility Types': 'Vrste objekata', 'Facility updated': 'Ažuriran objekat', 'Factors affecting school attendance': 'Faktori koji utiču na pohađanje škole', 'Fail': 'Neuspjeh', 'Failed': 'Nije uspjelo', 'Failed!': 'Nije uspjelo!', 'Fair': 'Pošteno', 'Falling Object Hazard': 'Opasnost od padajućih objekata', 'Families/HH': 'Porodice/HH', 'Family': 'Porodica', 'Family Care': 'Porodična briga', 'Family tarpaulins received': 'Cerade za porodicu primljene', 'Family tarpaulins, source': 'Porodične cerade, izvor', 'Family/friends': 'Porodica/prijatelji', 'Farmland/fishing material assistance, Rank': 'Materijalna pomoć za obradu zemlje/ribolov , rang', 'fat': 'mast', 'Fatalities': 'Ljudske žrtve', 'FAX': 'FAKS', 'Fax': 'Faks', 'Feature Class': 'Klasa karakteristika', 'Feature Class added': 'Klasa karakteristika dodana', 'Feature Class deleted': 'Obrisana klasa karakteristika', 'Feature Class Details': 'Detalji klase karakteristika', 'Feature Class updated': 'Klasa karakteristika ažurirana', 'Feature Classes': 'Klase karakteristika', 'Feature Classes are collections of Locations (Features) of the same type': 'Klase karakteristika su kolekcije lokacija (karakteristika) istog tipa', 'Feature Group': 'Grupa karakteristika', 'Feature Group added': 'Dodan grupa karakteristika', 'Feature Group deleted': 'Grupna karakteristika izbrisana', 'Feature Group Details': 'Detalji grupe karakteristika', 'Feature Group Updated': 'Grupa karakteristika ažurirana', 'Feature Group updated': 'Grupa karakteristika ažurirana', 'Feature Groups': 'Grupe karakteristika', 'Feature Info': 'Informacije o karakteristici', 'Feature Layer': 'Sloj karakteristika', 'Feature Layer added': 'Dodat sloj karakteristika', 'Feature Layer deleted': 'Obrisan sloj karakteristika', 'Feature Layer Details': 'Detalji sloja karakteristika', 'Feature Layer updated': 'Ažuriran sloj karakteristika', 'Feature Layers': 'Slojevi karakteristika', 'Feature Namespace': 'Imenik karakteristika', 'Feature Request': 'Zahtjev za karakteristikama', 'Feature Type': 'Tip karakteristike', 'Features Include': 'Karakteristike uključuju', 'feedback': 'povratna informacija', 'Feedback': 'Povratna informacija', 'Feet, Condition': 'Stopalo, stanje', 'Feet, Nails': 'Stopala, nokti', 'Feet, Shape': 'Stopalo, oblik', 'Female': 'Žensko', 'female': 'žensko', 'Female headed households': 'Domaćinstva u kojim je žena glava porodice', 'Few': 'Mali broj', 'Field': 'Terenski', 'Field Hospital': 'Poljska bolnica', 'Fields tagged with a star': 'Polja označena zvjezdicom', 'Fiji': 'Fidži', 'File': 'Datoteka', 'File Imported': 'Datoteka je unesena', 'File Importer': 'Uvoz datoteka', 'File name': 'Ime datoteke', 'File not found': 'Datoteka nije pronađena', 'File uploaded': 'Datoteka poslana', 'Files': 'Datoteke', 'Fill in Latitude': 'Dopuni geografsku širinu', 'Fill in Longitude': 'Upišite geografsku dužinu', 'fill in order: day(2) month(2) year(4)': 'popuni redoslijedom: dan(2) mjesec(2) godina(4)', 'fill in order: hour(2) min(2) day(2) month(2) year(4)': 'popuni redoslijedom: sat(2) min(2) dan(2) mjesec(2) godina(4)', 'fill in order: hour(2) min(2) month(2) day(2) year(4)': 'popuni redoslijedom: sat(2) min(2) mjesec(2) dan(2) godina(4)', 'fill in order: month(2) day(2) year(4)': 'popuni redoslijedom: mjesec(2) dan(2) godina(4)', 'Filter': 'Filtriraj', 'Filter by %(type)s': 'Filtriraj po %(type)s', 'Filter by Bookmark': 'Filtriraj po zabilješci', 'Filter by Category': 'Filtriraj po kategoriji', 'Filter by Country': 'Filtriraj po državi', 'Filter by Date': 'Filtriraj po datumu', 'Filter by Disaster': 'Filtriraj po katastrofi', 'Filter by Location': 'Filtriraj po lokaciji', 'Filter by Organization': 'Filtriraj po organizaciji', 'Filter by Status': 'Filtriraj po statusu', 'Filter by Tag': 'Filtriraj po oznaci', 'Filter by Type': 'Filtriraj po tipu', 'Filter Field': 'Polje filtera', 'Filter Options': 'Opcije filtera', 'Filter Tweets by the date they were tweeted on': 'Filtriraj Tweet po danu kada su navedeni', 'Filter Tweets by who tweeted them': 'Filtriraj Tweet po osobama koje su unijele', 'Filter type': 'Tip Filtera', 'Filter Value': 'Filter vrijednosti', 'Filtered search of aid pledges and requests': 'Filtrirana pretraga ponude i potražnje pomoći', 'Filters': 'Filteri', 'final report': 'završni izvještaj', 'Find': 'Pronađi', 'Find a Person Record': 'Nađite zapis o osobi', 'Find by Name': 'Nađi po imenu', 'Find Dead Body Report': 'Pronađi izvještaj o mrtvim osobama', 'Find Details': 'Nađi detalje', 'Find Hospital': 'Pronađi bolnicu', 'Find more': 'Nađi više', 'Find on Map': 'Nađi na karti', 'Find Person Record': 'Pronađi zapis osobe', 'Find Recovery Report': 'Nađi Izvještaj o pronalaženju', 'Find Report added': 'Dodat izvjestaj o traženju', 'Find Report deleted': 'Izvještaj o traženju izbrisan', 'Find Report updated': 'Traženi izvještaj ažuriran', 'Find Volunteers': 'Pronađi volontere', 'Finder': 'Pronalazač', 'Fingerprint': 'Otisak prsta', 'Fingerprinting': 'Uzimanje otiska prsta', 'Fingerprints': 'Otisci', 'Finish': 'Završetak', 'Finished Jobs': 'Gotovi zadaci', 'Finland': 'Finska', 'Fire': 'Vatra', 'Fire Fighter Forest Vehicle': 'Vatrogasno šumsko vozilo', 'Fire Fighter Light Vehicle': 'Vatrogasno lako vozilo', 'Fire Fighter Rural Vehicle': 'Vatrogasno seosko vozilo', 'Fire Fighter Special Vehicle': 'Vatrogasno specijalno vozilo', 'Fire Fighter Urban Vehicle': 'Vatrogasno gradsko vozilo', 'Fire Station': 'Vatrogasna stanica', 'Fire suppression and rescue': 'Suzbijanje vatre i spašavanje', 'First': 'Prvi', 'First name': 'Ime', 'First Name': 'Ime', 'Fishing': 'Ribolov', 'Flash Flood': 'Nagla poplava', 'Flash Freeze': 'Brzo zamrzavanje', 'flatfooted': 'dustabanlija', 'Flexible Impact Assessments': 'Fleksibilna procjena uticaja', 'Flood': 'Poplava', 'Flood Alerts': 'Uzbune od poplava', 'Flood Alerts show water levels in various parts of the country': 'Alarmi poplava pokazuju vodostaje u različitim dijelovima države', 'Flood Depth': 'Dubina poplave', 'Flood Report': 'Izvještaj o poplavi', 'Flood Report added': 'Izvještaj o poplavi dodan', 'Flood Report deleted': 'Izvještaj o poplavi izbrisan', 'Flood Report Details': 'Detalji izvještaja o poplavi', 'Flood Report updated': 'Izvještaj o Poplavi ažuriran', 'Flood Reports': 'Izvještaji o poplavama', 'Flooding': 'Poplava', 'Flow Status': 'Status toka', 'flush latrine with septic tank': 'Očisti zahod i septičku jamu', 'Focal Person': 'Poznata osoba', 'Focal Point': 'Tačka fokusa', 'Fog': 'Magla', 'Folder': 'Mapa', 'Food': 'Hrana', 'Food assistance': 'Pomoć u hrani', 'Food assistance available/expected': 'Pomoć u hrani primljena/očekivana', 'Food security ': 'Sigurnost hrane', 'Food Supply': 'Zalihe hrane', 'food_sources': 'izvori hrane', 'Footer': 'Zaglavlje na dnu strane', 'Footer file %s missing!': 'Nedostaje datoteka zaglavlja %s!', 'For': 'Za', 'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'Za zemlju to će biti ISO2 kod, za grad, to bi bio Locode aerodroma', 'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Za svakog sinhronizovanog partnera , postoji zadani sinhronizovani posao nakon određenog vremenskog intervala . Takođe možete postaviti više sinhronizovanih poslova koji mogu biti prilagođeni prema vašim potrebama . Kliknite link nadesno da počnete.', 'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.': 'Za instalacije Eden platforme unesite URL bazne organizacije, npr. http://sync.sahanfoundation.org/eden, za druge učesnike URL sinhronizacijskog interfejsa.', 'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'Za povećanu sigurnost, preporučljivo je upisati korisničko ime i šifru, te obavijestiti administratora ostalih mašina u Vašoj organizaciji da doda to korisničko ime i šifru preko Vašeg UUID u Sinhronizacija -> Sinhronizacijski partneri', 'For Entity': 'Za jedinku', 'For live help from the Sahana community on using this application, go to': 'Ako trebate pomoć pri korištenju ove aplikacije od strane Sahana zajednice, idite na', 'For messages that support alert network internal functions': 'Za poruke koje podržavaju interne funkcije mreža za uzbunjivanje', 'For more details on the Sahana Eden system, see the': 'Za vise detalja o Sahana Eden sistemu, pogledati', 'For more details on the Sahana system, see the': 'Za vise detalja o Sahana sistemu, pogledati', 'For more information, see': 'Za više informacija, pogledaj', 'For more information, see ': 'Za više informacija, pogledajte ', 'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'Za POP-3 ovo je obično 110 (995 za SSL), za IMAP ovo je obično 143 (993 za IMAP).', 'For:': 'Za:', 'forehead': 'čelo', 'Forehead, Height': 'Čelo, visina', 'Forehead, Inclination': 'Čelo, nagib', 'Forehead, Width': 'Čelo, širina', 'Forest Fire': 'Šumski požar', 'Forest Tank Tactical Vehicle': 'Šumska taktička pokretna cisterna', 'form data': 'podaci formulara', 'Form Settings': 'Postavke obrasca', 'Formal camp': 'Formalni kamp', "Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Oblikujte popis atributa i RGB vrijednosti da bi se koristile kao JSON objekt, npr.: {Crvena: '#FF0000', Zelena: '#00FF00 ', Žuta: '#FFFF00 '}", 'Forms': 'Formulari', 'Found': 'Pronađeno', 'found': 'nađeno', 'Foundations': 'Osnove', 'Freezing Drizzle': 'ledeno rominjanje', 'Freezing Rain': 'Ledena kiša', 'Freezing Spray': 'ledena kiša', 'Freight company or organisation providing transport': 'Transportno preduzeće ili organizacija koja pruža transport', 'French': 'Francuski', 'Frequency': 'Učestanost', 'Friday': 'Petak', 'From': 'Od', 'From %(site)s': 'Sa %(site)s', 'From Facility': 'Iz objekta', 'From Inventory': 'Iz inventara', 'From Location': 'Sa lokacije', 'From Organization': 'Od organizacije', 'From Person': 'Od osobe', 'from Twitter': 'sa Twittera', 'Frost': 'Mraz', 'Fuel': 'Gorivo', 'Fulfil. Status': 'Ispuni status', 'Fulfill Status': 'Ispuni status', 'Fulfillment Status': 'Status realizacije', 'full': 'puno', 'Full': 'Potpun', 'Full beard': 'Puna brada', 'Fullscreen Map': 'Mapa punog ekrana', 'Function': 'Funkcija', 'Function name': 'Ime funkcije', 'Function Permissions': 'Funkcijske dozvole', 'Function tour is activated': 'Tura funkcije je aktivirana', 'Functions available': 'Dostupne funkcije', 'Funding': 'Fondovi', 'Funding Organisation': 'Osnivačka organizacija', 'Funding Organization': 'Osnivačka organizacija', 'Funding Report': 'Izvještaj o fondovima', 'Funds Contributed': 'Doprinos fondovima', 'Funeral': 'Sahrana', 'Further Action Recommended': 'Preporučljive su daljnje akcije', 'Gale Wind': 'Jak vjetar', 'Gap Analysis': 'Analiza propusta', 'Gap Analysis Map': 'Karta analize propusta', 'Gap Analysis Report': 'Izvještaj o analizi pukotina', 'Gap Map': 'Karta sa pukotinama', 'Gap Report': 'Izvještaj propusta', 'Gas Supply Left (in hours)': 'Preostala zaliha goriva (u satima)', 'Gas Supply Type': 'Vrsta zaliha goriva', 'Gateway': 'Mrežni izlaz', 'Gateway Settings': 'Postavke mrežnog izlaza', 'Gateway settings updated': 'Postavke mrežnog izlaza ažurirane', 'Gender': 'Spol', 'General': 'Općenito', 'General Comment': 'Generalni komentar', 'General emergency and public safety': 'Opće opasnosti i javna sigurnost', 'General information on demographics': 'Opšte demografske informacije', 'General Medical/Surgical': 'Opće zdravstveno / hirurško', 'General Person Transportation Vehicle': 'Transportno vozilo opšte namjene', 'General Skills': 'Opšte vještine', 'Generate portable application': 'Generiši prenosivu aplikaciju', 'Geocode': 'Geokod', 'Geocoder Selection': 'Izbor geokodera', 'GeoJSON Layer': 'GeoJSON sloj', 'Geometry Name': 'Geometrijski naziv', 'Geonames.org search requires Internet connectivity!': 'Geonames.org pretraga zahtijeva Internet vezu!', 'Geophysical (inc. landslide)': 'Geofizički (ink. klizište)', 'Georgia': 'Gruzija', 'GeoRSS Layer': 'GeoRSS sloj', 'Geotechnical': 'Geotehnički', 'Geotechnical Hazards': 'Geotehničke opasnosti', 'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Modul Geraldo nije dostupan unutar pokrenutog Python-a, ovo zahtijeva instalaciju PDF izlaza!', 'Geraldo module not available within the running Python - this needs installing to do PDF Reporting!': 'Modul Geraldo nije dostupan unutar pokrenutog Python-a, ovo zahtijeva instalaciju PDF izlaza!', 'German': 'njemački', 'Germany': 'Njemačka', 'Get Feature Info': 'Dobavi informacije o karakteristici', 'Get incoming recovery requests as RSS feed': 'Dobijte dolazeće zahtjeve za oporavak kao RSS feed', 'getting': 'uzimajući', 'Ghana': 'Gana', 'Girls 13-18 yrs in affected area': 'Djevojčice 13-18 god u pogođenom području', 'Girls 13-18 yrs not attending school': 'Djevojčice 13-18 godina koji ne pohađaju školu', 'Girls 6-12 yrs in affected area': 'Djevojčice 6-12 godina u zahvaćenim područjima', 'Girls 6-12 yrs not attending school': 'Djeviojčice 6-12 godina koje ne pohađaju školu', 'GIS integration to view location details of the Shelter': 'GIS integracija za pregled detalja lokacije skloništa', 'GIS Reports of Shelter': 'GIS Izvještaji skloništa', 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Dajte kratak opis fotografije, npr. šta se gdje može vidjeti na slici (nije obavezno).', 'Give information about where and when you have seen the person': 'Dajte informaciju o tome gdje i kada ste vidjeli ovu osobu', 'Give information about where and when you have seen them': 'Dajte informaciju o tome gdje i kada ste ih vidjeli', 'Global Messaging Settings': 'Globalna Podešavanje Poruka', 'Go': 'Idi', "Go to %(url)s, sign up & then register your application. You can put any URL in & you only need to select the 'modify the map' permission.": "Idite na %(url)s, prijavite se i registrujte vašu aplikaciju. Možete unijeti neki URL i tada samo trebati izmijeniti dozvole za 'mijenjanje mape'.", 'Go to Request': 'Idi na zahtjev', 'Goatee': 'Kozja bradica', 'Good': 'Dobro', 'Good Condition': 'Dobro stanje', 'Goods Received Note': 'Napomena o prijemu robe', 'Google Layer': 'Google sloj', "Google Layers cannot be displayed if there isn't a valid API Key": 'Google Layers ne mogu biti prikazani ukoliko ne postoji validan API ključ', 'Government': 'Vlada', 'Government building': 'Zgrada Vlade', 'Government UID': 'JMB', 'Government UUID': 'JMB', 'GPS Data': 'GPS podaci', 'GPS data': 'GPS podaci', 'GPS data added': 'GPS podaci dodani', 'GPS data deleted': 'GPS podaci obrisani', 'GPS data updated': 'GPS podaci ažurirani', 'GPS Track': 'GPS praćenje', 'GPS Track File': 'Datoteka GPS praćenja', 'GPX Layer': 'GPX SLOJ', 'GPX Track': 'GPX staza', 'Grade': 'Ocjena', 'Graph': 'Grafikon', 'Graph Model': 'Model grafa', 'Great British Pounds': 'Britanske funte', 'Greater than 10 matches. Please refine search further': 'Više od 10 poklapanja. Molim napravite precizniju pretragu', 'Greece': 'Grčka', 'Greek': 'Grčki', 'green': 'zelena', 'Green': 'Zeleno', 'grey': 'siva', 'Grid': 'Mreža', 'Ground movement, fissures': 'Kretanje tla, pukotine', 'Ground movement, settlement, slips': 'Pokreti zemljišta, naselja, klizišta', 'Group': 'Grupa', 'Group added': 'Grupa dodana', 'Group deleted': 'Grupa obrisana', 'Group description': 'Opis grupe', 'Group Description': 'Opis grupe', 'Group Details': 'Detalji o grupi', 'Group Head': 'Glavna osoba grupe', 'Group ID': 'IB grupe', 'Group Leader': 'Vođa grupe', 'Group Member added': 'Dodan član grupe', 'Group Members': 'Članovi grupe', 'Group Membership added': 'Dodano članstvo grupe', 'Group Membership deleted': 'Grupno članstvo izbrisano', 'Group Membership Details': 'Detalji grupnog članstva', 'Group Membership updated': 'Grupno članstvo ažurirano', 'Group Memberships': 'Grupna članstva', 'Group name': 'Ime grupe', 'Group Name': 'Ime grupe', 'Group Title': 'Naslov Grupe', 'Group Type': 'Vrsta grupe', 'Group updated': 'Grupa ažurirana', 'Group Updated': 'Ažurirana grupa', 'Grouped by': 'Grupisano po', "Grouping by 'Family Unit' or other group category": 'Grupisanje po porodičnoj jedinici ili drugoj kategoriji grupe', 'Groups': 'Grupe', 'Groups removed': 'Grupa odstranjena', 'Guatemala': 'Gvatemala', 'Guest': 'Gost', 'Guided Tour Functionality': 'Funkcionalnost vođene ture', 'Guided Tours': 'Vođene ture', 'Guinea': 'Gvineja', 'Guinea-Bissau': 'Gvineja-Bisau', 'Guyana': 'Gvajana', 'Hail': 'Gräd', 'Hair Color': 'Boja kose', 'Hair Comments': 'Komentari na kosu', 'Hair Length': 'Dužina kose', 'Hair of the head, Baldness (extent)': 'Kosa na glavi, ćelavost (veličina)', 'Hair of the head, Baldness (location)': 'Kosa na glavi, ćelavost (mjesto)', 'Hair of the head, Colour': 'Kosa, boja', 'Hair of the head, Length': 'Kosa, dužina', 'Hair of the head, Parting': 'Kosa, razdjeljak', 'Hair of the head, Shade of colour': 'Kosa na glavi, nijansa boje', 'Hair of the head, Style': 'Kosa, stil', 'Hair of the head, Thickness': 'Kosa, debljina', 'Hair of the head, Type': 'Kosa, tip', 'Hair Style': 'Frizura', 'Hair-piece': 'Dlaka', 'Hands, Nail length': 'Ruke, dužina noktiju', 'Hands, Nail peculiarities': 'Ruke, specifičnosti noktiju', 'Hands, Nicotine': 'Ruke, nikotin', 'Hands, Shape': 'Ruke, oblik', 'Hands, Size': 'Ruke, veličina', 'Has data from this Reference Document been entered into Sahana?': 'Da li su podaci iz ovog referentnog dokumenta uneseni u Sahanu?', 'Has only read-only access to records relating to this Organization or Site.': 'Da li ima pristup samo za čitanje vezan za ovu organizaciju ili mjesto?', 'Has the %(GRN)s (%(GRN_name)s) form been completed?': 'Da li je formular %(GRN)s (%(GRN_name)s) ispunjen?', 'Has the Certificate for receipt of the shipment been given to the sender?': 'Da li je pošiljalac primio certifikat o prijemu isporuke?', 'Has the GRN (Goods Received Note) been completed?': 'Da li su BPR (Bilješke o Primljenoj Robi) popunjene?', 'Has your business been damaged in the course of the disaster?': 'Da li je vaše posao oštećen usljed katastrofe?', 'Have normal food sources been disrupted?': 'Da li su normalni izvori hrane oštećeni?', 'Hazard': 'Rizik', 'Hazard added': 'Rizik dodan', 'Hazard added to Project': 'Rizik dodan u projekat', 'Hazard deleted': 'Rizik obrisan', 'Hazard Details': 'Detalji rizika', 'Hazard Pay': 'Rizično plaćanje', 'Hazard removed from Project': 'Rizik uklonjen sa projekta', 'Hazard updated': 'Rizik ažuriran', 'Hazardous Material': 'Opasan materijal', 'Hazardous Road Conditions': 'Opasni uslovi na putu', 'Hazards': 'Rizici', 'Head': 'Glava', 'Head form, front': 'Oblik glave, prednji', 'Head form, profile': 'Oblik glave, profil', 'Header Background': 'Pozadina zaglavlja', 'Header background file %s missing!': 'Pozadinska datotka zaglavlja %s nedostaje!', 'Headquarters': 'Glavno sjedište', 'Health': 'Zdravlje', 'Health care assistance, Rank': 'Pomoć zdravstvene zaštite, stepen', 'Health center': 'Zdravstveni centar', 'Health center with beds': 'Zdravstveni centar sa krevetima', 'Health center without beds': 'Zdravstveni centar bez kreveta', 'Health Org UUID': 'Identifikacijski broj zrdavstvene organizacije', 'Health services functioning prior to disaster': 'Zdravstvene usluge koje su funkcionisale prije katastrofe', 'Health services functioning since disaster': 'Zdravstvene usluge koje djeluju nakon katastrofe-', 'Health services status': 'Status zdravstvenih usluga', 'Healthcare Worker': 'Zdravstveni radnik', 'Heat and Humidity': 'Toplota i Vlažnost', 'Heat Wave': 'Toplotni talas', 'heavy': 'težak', 'Height': 'Visina', 'Height (cm)': 'Visina (cm)', 'Height (m)': 'visina (m)', 'Helipad Information': 'Informacije o helikopterskom sletištu', 'Heliport': 'Heliodrom', 'Heliport added': 'Heliodrom dodan', 'Heliport deleted': 'Heliodrom obrisan', 'Heliport Details': 'Detalji heliodroma', 'Heliport updated': 'Heliodrom ažuriran', 'Heliports': 'Heliodromi', 'Help': 'Pomoć', 'Helps to monitor status of hospitals': 'Pomaže pri praćenju statusa bolnica', 'Helps to report and search for Missing Persons': 'Pomaže pri izvještavanju i traženju nestalih osoba', 'Helps to report and search for missing persons': 'Pomaže pri prijavljivanju i traženju nestalih osoba', 'here': 'ovdje', 'Here are the solution items related to the problem.': 'Ovdje su predmeti rješenja povezani sa problemom.', 'Heritage Listed': 'Izlistano nasljeđe', 'HFA Priorities': 'HFA Prioriteti', 'HFA1: Ensure that disaster risk reduction is a national and a local priority with a strong institutional basis for implementation.': 'HFA1: Osigurajte da je smanjenje rizika od katastrofe državni i lokalni prioritet i jaka institucionalna baza za implementaciju.', 'HFA2: Identify, assess and monitor disaster risks and enhance early warning.': 'HFA2: Prepoznati, procijeniti i pratiti rizike od katastrofe i pojačati rano upozoravanje', 'HFA3: Use knowledge, innovation and education to build a culture of safety and resilience at all levels.': 'HFA3: Koristiti znanjem inovacije i obrazovanje da se sagradi bezbjednosna kultura na svim nivoima.', 'HFA4: Reduce the underlying risk factors.': 'HFA4: Smanjiti podložne faktore rizika.', 'HFA5: Strengthen disaster preparedness for effective response at all levels.': 'HFA5: Pojačati spremnost za katastrofe za efikasan odgovor na svim nivoima.', 'Hide': 'Sakrij', 'Hide Table': 'Sakrij tabelu', 'Hierarchy': 'Hijerarhija', 'Hierarchy Level 0 Name (i.e. Country)': 'Ime nultog hijerarhijskog nivoa (države)', 'Hierarchy Level 1 Name (e.g. State or Province)': 'Ime prvog nivoa hijerarhije (npr. savezna država/republika/pokrajina)', 'Hierarchy Level 2 Name (e.g. District or County)': 'Ime drugog nivoa hijerarhije (npr. kanton/regija)', 'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Ime trećeg nivoa hijerarhije (npr. grad/opština/selo)', 'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Ime četvrtog nivoa hijerarhije (npr. susjedstvo/zaseok)', 'Hierarchy Level 5 Name': 'Ime petog nivoa hijerarhije', 'high': 'visoko', 'High': 'Visok', 'High Tide Depth': 'Dubina visoke plime', 'High Water': 'Najveći vodostaj', 'Highest Priority Open Requests': 'Najviši prioritet za otvorene zahtjeve', 'highly critical': 'Vrlo kritično', 'History': 'Istorija', 'Hit the back button on your browser to try again.': "Stisnite 'Nazad' na vašem pretraživaču da pokušte ponovo.", 'Holiday Address': 'Adresa za vrijeme odmora', 'Home': 'Početak', 'Home added': 'Kuća dodana', 'Home Address': 'Kućna adresa', 'Home City': 'Grad stanovanja', 'Home Country': 'Matična država', 'Home Crime': 'Kućni kriminal', 'Home deleted': 'Kuća izbrisana', 'Home Details': 'Kućni detalji', 'Home Phone': 'Kućni telefon', 'Home phone': 'Kućni telefon', 'Home Phone Number': 'Kućni telefon', 'Home Relative': 'Kućni srodnik', 'Home updated': 'Ažuriran dom', 'Homes': 'Kuće', 'horizontal': 'horizontalno', 'Hospital': 'Bolnica', 'Hospital Details': 'Pojedinosti bolnice', 'Hospital information added': 'Dodana informacija o bolnici', 'Hospital information deleted': 'Obrisana informacija o bolnici', 'Hospital information updated': 'Ažurirana informacija o bolnici', 'Hospital Management': 'Upravljanje bolnicom', 'Hospital status assessment.': 'Procjena stanja bolnice.', 'Hospital Status Report': 'Izvještaj o statusu bolnice', 'Hospitals': 'Bolnice', 'Host': 'Domaćin', 'Hot Spot': 'Kritična tačka', 'Hour': 'Sat', 'Hourly': 'Svaki sat', 'hourly': 'svaki sat', 'hours': 'sati', 'Hours': 'Sati', 'Hours added': 'Sati dodani', 'Hours by Program Report': 'Sati po programskom izvještaju', 'Hours by Role Report': 'Sati po izvještaju o ulogama', 'Hours deleted': 'Sati obrisani', 'Hours Details': 'Detalji sati', 'Hours updated': 'Sati ažurirani', 'Household kits received': 'Kompleti za domaćinstva primljeni', 'Household kits, source': 'Kućanski kompleti, izvor', 'households': 'domaćinstva', 'How data shall be transferred': 'Kako podaci trebaju biti preneseni', 'How did boys 13-17yrs spend most of their time prior to the disaster?': 'Kako su dječaci 13-17 god. provodili većinu vremena prije katastrofe?', 'How did boys <12yrs spend most of their time prior to the disaster?': 'Kako su dječaci <12 god. provodili većinu vremena prije katastrofe?', 'How did boys girls 13-17yrs spend most of their time prior to the disaster?': 'Kako su mladići i djevojke 13-17 godina provodili većinu vremena prije katastrofe', 'How did girls <12yrs spend most of their time prior to the disaster?': 'Kako su djevojčice <12 god. provodili većinu vremena prije katastrofe?', 'How do boys 13-17yrs spend most of their time now?': 'Kako mladići 13-17 godina sada provode većinu vremena?', 'How do boys <12yrs spend most of their time now?': 'Kako dječaci <12yrs sada provode većinu vremena?', 'How do girls 13-17yrs spend most of their time now?': 'Kako djevojke 13-17 godina sada provode većinu vremena?', 'How do girls <12yrs spend most of their time now?': 'Kako djevojčice <12yrs sada provode većinu vremena?', 'How does it work?': 'Kako ovo radi?', 'How is this person affected by the disaster? (Select all that apply)': 'Kako je osoba pogođena katastrofom? (Odaberite sve što se može primjeniti)', 'How local records shall be updated': 'Kako se lokalni zapisi trebaju ažurirati', 'How long will the food last?': 'Koliko dugo će hrana trajati?', 'How long will this water resource last?': 'Koliko dugo će ovaj resurs vode trajati?', 'How many Boys (0-17 yrs) are Dead due to the crisis': 'Koliko dječaka (starosti od 0 do 17 god) je mrtvo usljed trenutne krize', 'How many Boys (0-17 yrs) are Injured due to the crisis': 'Koliko dječaka (0-17 godina) je povrijeđeno zbog krize', 'How many Boys (0-17 yrs) are Missing due to the crisis': 'Koliko dječaka (0 - 17 godina) je nestalo tokom krize', 'How many days will the supplies last?': 'Koliko dana će trajati zalihe?', 'How many doctors in the health centers are still actively working?': 'Koliko ljekara u zdravstvenim centrima još aktivno radi?', 'How many Girls (0-17 yrs) are Dead due to the crisis': 'Koliko djevojaka (0-17godina) je umrlo tokom ove krize', 'How many Girls (0-17 yrs) are Injured due to the crisis': 'Koliko djevojki (0-17 god.) je ozlijeđeno zbog nepogode', 'How many Girls (0-17 yrs) are Missing due to the crisis': 'Koliko djevojčica (0-17 god) je nestalo uslijed krize', 'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': 'Koliko je kuća oštećeno ali još upotrebljivo (upotrebljivo = razbijeni prozori, pukotine u zidu, krov malo oštećen)?', 'How many latrines are available in the village/IDP centre/Camp?': 'Koliko zahoda je dostupan u selu/centru/kampu?', 'How many Men (18 yrs+) are Dead due to the crisis': 'Koliko muškaraca (18+ godina) je umrlo zbog krize', 'How many Men (18 yrs+) are Injured due to the crisis': 'Koliko muškaraca (preko 18 god.) je povrijeđeno usljed krize', 'How many Men (18 yrs+) are Missing due to the crisis': 'Koliko muškaraca (18 god+) je nestalo uslijed krize', 'How many midwives in the health centers are still actively working?': 'Koliko babica u zdravstvenim centrima još aktivno radi?', 'How many new cases have been admitted to this facility in the past 24h?': 'Koliko novih slučajeva je primljeno u ovaj objekat u posljednjih 24h?', 'How many nurses in the health centers are still actively working?': 'Koliko medicinskih sestara u zdravstvenim centrima još aktivno radi?', 'How many of the patients with the disease died in the past 24h at this facility?': 'Koliko pacijenata sa ovom bolesti je umrlo u posljednjih 24h u ovom objektu?', 'How many of the primary school age boys (6-12) in the area are not attending school?': 'Koliko dječaka osnovaca (6-12) u ovom području ne pohađaju školu?', 'How many of the primary school age girls (6-12) in the area are not attending school?': 'Koliko djevojčica osnovaca (6-12) u ovom području ne pohađaju školu?', 'How many of the secondary school age boys (13-18) in the area are not attending school?': 'Koliko srednjoškolskih mladića (13-18) u ovom području ne pohađaju školu?', 'How many of the secondary school age girls (13-18) in the area are not attending school?': 'Koliko srednjoškolskih djevojaka (13-18) u ovom području ne pohađaju školu?', 'How many patients with the disease are currently hospitalized at this facility?': 'Koliko pacijenata sa tom bolesti je trenutno hospitalizovano u ovom objektu?', 'How many primary school age boys (6-12) are in the affected area?': 'Koliko dječaka osnovaca (6-12) je u pogođenom području?', 'How many primary school age girls (6-12) are in the affected area?': 'Koliko djevojčica osnovaca (6-12) je u pogođenom području?', 'How many secondary school age girls (13-18) are in the affected area?': 'Koliko srednjoškolskih djevojaka (13-18) je u pogođenom području?', 'How many teachers have been affected by the disaster (affected = unable to work)?': 'Kako je nastavnika pogođeno katastrofom? (pogođeno = ne može raditi)', 'How many teachers worked in the schools prior to the disaster?': 'Koliko je nastavnika radilo u školi prije katastrofe?', 'How many Women (18 yrs+) are Dead due to the crisis': 'Koliki broj Žena (od 18 godina i više) je poginulo tokom krize', 'How many Women (18 yrs+) are Injured due to the crisis': 'Koliko žena (18+ godina) je povrijeđeno uslijed krize', 'How many Women (18 yrs+) are Missing due to the crisis': 'Koliko žena(18godina+) je nestalo uslijed krize', 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'Koliko detalja je vidljivo. Velik nivo zumiranja znači mnogo detalja, ali ne i široku oblast prikaza. Nizak nivo zumiranja znači prikaz široke oblasti, ali ne i visok nivo detalja.', 'How often you want to be notified. If there are no changes, no notification will be sent.': 'Koliko često želite biti obavještavani. Ako nema izumjena, napomene se neće slati.', 'How you want to be notified.': 'Kako želite biti obavještavani', 'HR Manager': 'Menadžer ljudskih resursa', 'HTML class': 'HTML klasa', 'Human Resource': 'Ljudski resurs', 'Human Resource added': 'Ljudski resurs dodan', 'Human Resource assigned': 'Dodijeljeni ljudski resursi', 'Human Resource Assignment updated': 'Dodjela ljudskih resursa ažurirana', 'Human Resource Assignments': 'Dodjeljivanje ljudskih resursa', 'Human Resource Details': 'Detalji ljudskih resursa', 'Human Resource Management': 'Rukovodstvo ljudskim resursima', 'Human Resource removed': 'Ljudski resurs uklonjen', 'Human Resource unassigned': 'Nedodijeljeni ljudski resursi', 'Human Resource updated': 'Ažuriran ljudski resurs', 'Human Resources': 'Ljudski resursi', 'Human Resources Management': 'Upravljanje ljudskim resursima', 'Humanitarian NGO': 'Humanitarna NVO', 'Hungary': 'Mađarska', 'Hurricane': 'Uragan', 'Hurricane Force Wind': 'Vjetar snage uragana', 'Hybrid Layer': 'Hibridni sloj', 'Hygiene': 'Higijena', 'Hygiene kits received': 'Primljeni higijenski kompleti', 'Hygiene kits, source': 'Higijenski kompleti, izvor', 'Hygiene NFIs': 'Higijenski neprehrambeni artikli', 'Hygiene practice': 'higijenska praksa', 'Hygiene problems': 'Higijenski problemi', 'Hygiene promotion': 'Unapređenje higijene', 'I accept. Create my account.': 'Prihvatam. Kreiraj moj račun.', 'I agree to the %(terms_of_service)s': 'Slažem se sa %(terms_of_service)s', 'I am available in the following area(s)': 'Dostupan sam u sljedećim područjima', 'Ice Pressure': 'Pritisak leda', 'Iceberg': 'Santa leda', 'Iceland': 'Island', 'ICT': 'IKT', 'ID': 'IB', 'ID Label': 'ID oznaka', 'ID Label:': 'ID oznaka:', 'ID Tag': 'ID oznaka', 'ID Tag Number': 'Broj identifikacijske kartice', 'ID type': 'ID tip', 'Identificación de Víctimas de Desastres': 'Identifikacija žrtava katastrofe', 'Identification': 'Identifikacija', 'Identification label of the Storage bin.': 'Identifikacija korpe za smještaj', 'Identification Report': 'Izvještaj o identifikaciji', 'Identification Reports': 'Izvještaji identifikacija', 'Identification Status': 'Status identifikacije', 'identified': 'identificiran', 'Identified as': 'Identifikovano kao', 'Identified by': 'Identifikovan od strane', 'Identifier Name for your Twilio Account.': 'Ime identifikatora za vaš Twilio nalog.', 'Identifier which the remote site uses to authenticate at this site when sending synchronization requests.': 'Identifikator koji će udaljeni sajt koristiti za provjeru prijave na ovaj sajt kada šalje zahtjeve za sinhronizacijom.', 'Identities': 'Identiteti', 'Identity': 'Identitet', 'Identity added': 'Identitet dodan', 'Identity deleted': 'Identitet obrisan', 'Identity Details': 'Detalji o identitetu', 'Identity updated': 'Identitet ažuriran', 'IEC Materials': 'IEC materijali', 'If a ticket was issued then please provide the Ticket ID.': 'Ako je kartica izdata molimo vas da obezbijedite ID kartice', 'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'Ako korisnik potvrdi da posjeduje e-mail adresu ove domene, polje odobravatelja će se koristiti da definira da li i od strane koga se traže daljnje potvrde.', 'If checked, the notification will contain all modified records. If not checked, a notification will be send for each modified record.': 'Ako je označeno, napomena će sadržati sve izmijenjene zapise. Ako nije označeno, napomena će biti poslana za svaki izmijenjeni zapis.', 'If it is a URL leading to HTML, then this will downloaded.': 'Ako URL vodi ka HTML-u, ovo će biti preuzeto.', 'If neither are defined, then the Default Marker is used.': 'Ako nijedan nije definisan, onda je korišten Podrazumjevani Znak', 'If no marker defined then the system default marker is used': 'Ako nema definisanog markera onda se koristi standardni marker sistema', 'If no, specify why': 'Ako ne, navedite zašto', 'If none are selected, then all are searched.': 'Ako nijedan nije označen, svi će biti pretraženi.', 'If not found, you can have a new location created.': 'Ako nije nađeno, možete kreirati novu lokaciju.', "If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'Ako je odabrano, lokacija ovog sredstva će biti ažurirana kada se kod ažurira lokacija osobe', 'If the location is a geographic area, then state at what level here.': 'Ako je lokacija geografsko područje, navedite na kojem je nivou.', 'If the person counts as essential staff when evacuating all non-essential staff.': 'Ako se osoba računa kao neohodno osoblje pri evakuaciji svog osoblja koje nije neophodno.', 'If the request is for %s, please enter the details on the next screen.': 'Ako je zahtjev za %s, unesite detalje na sljedećem ekranu.', 'If the request type is "Other", please enter request details here.': 'Ako je tip zahtjev "Drugi", unesite detalje zahtjeva ovdje.', 'If the service requries HTTP BASIC Auth (e.g. Mobile Commons)': 'Ako usluga zahtijeva HTTP BASIC Autorizaciju (npr. Mobile Commons)', 'If there are multiple configs for a person, which should be their default?': 'Ako ima više konfiguracija za jednu osobu, koja treba biti podrazumijevana?', "If this configuration is displayed on the GIS config menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Ako je ova konfiguracija prikazana na GIS konfiguracijskom meniju, dajte ime da se koristi u meniju. Ime za ličnu konfiguraciju mape će se koristiti za korisničko ime.', "If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Ako ova konfiguracija predstavlja lokalitet za Izbornik Lokaliteta, postavite naziv da biste je koristili u izborniku. Kao naziv za ličnu konfiguraciju mape će biti postavljeno ime korisnika.', "If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'Ako je ovo polje popunjeno onda će korisniku koji specificira ovu organizaciju pri upisu biti osoblje organizacije osim ako se njegovo područje ne podudara sa područjem polja.', 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'Ako je ovo polje popunjeno tada korisnik sa navedenom domenom će biti automatski dodijeljen kao osoblje ove organizacije', 'If this is a request template to be added repeatedly then the schedule can be set on the next page.': 'Ako je ovo predložak za zahtjev koji će se dodati iznova, onda se raspored može postaviti na sljedećoj strani', 'If this is set to True then mails will be deleted from the server after downloading.': 'Ako je ovo uključeno, tada će poruke elektronske pošte biti obrisane sa servera nakon preuzimanja', "If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'Ako je ovo označeno, onda će ovo postati korisnikova osnovna lokacija i samim time lokacija na kojoj će korisnik biti prikazan na mapi.', 'If this record should be restricted then select which role is required to access the record here.': 'Ako bi ovaj zapis trebao biti ograničen, ovdje odaberite kojoj ulozi je dozvoljen pristup zapisu.', 'If this record should be restricted then select which role(s) are permitted to access the record here.': 'Ako je ovaj zapis ograničen, označiti kojim ulogama je dozvoljen pristupovom zapisu', 'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': 'Ako je Unit = m, Base Unit = Km, tada je multiplikator is 0.0001 jer je 1m = 0.001 km.', 'If yes, specify what and by whom': 'Ako da, navedite šta i od strane koga', 'If yes, which and how': 'Ako jeste, koji i kako', 'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'Ako ne unesete odgovarajući dokument, vaš e-mail će biti prikazan kako mogli potvrditi ove podatke.', "If you don't see the activity in the list, you can add a new one by clicking link 'Create Activity'.": "Ukoliko aktivnost ne vidite u popisu, možete dodati novu klikom na link 'Kreiraj aktivnost'.", "If you don't see the asset in the list, you can add a new one by clicking link 'Create Asset'.": "Ukoliko sredstvo ne vidite u popisu, možete dodati novo klikom na link 'Kreiraj sredstvo'.", "If you don't see the beneficiary in the list, you can add a new one by clicking link 'Add Beneficiaries'.": 'Ukoliko ne vidite korisnika u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj korisnika"', "If you don't see the campaign in the list, you can add a new one by clicking link 'Add Campaign'.": 'Ukoliko ne vidite kampanju u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj kampanju"', "If you don't see the Cluster in the list, you can add a new one by clicking link 'Add New Cluster'.": 'Ukoliko ne vidite skup u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj novi skup"', "If you don't see the community in the list, you can add a new one by clicking link 'Create Community'.": "Ukoliko zajednicu ne vidite u popisu, možete dodati novu klikom na link 'Kreiraj zajednicu'.", "If you don't see the Hospital in the list, you can add a new one by clicking link 'Add Hospital'.": "Ukoliko bolnicu ne vidite u popisu, možete dodati novu klikom na link 'Dodaj bolnicu'.", "If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": "Ukoliko bolnicu ne vidite u popisu, možete dodati novu klikom na link 'Kreiraj bolnicu'.", "If you don't see the location in the list, you can add a new one by clicking link 'Create Location'.": 'Ukoliko ne vidite lokaciju u popisu, možete dodati novu tako što ćete kliknuti na link "Kreiraj lokaciju"', "If you don't see the Office in the list, you can add a new one by clicking link 'Add Office'.": "Ukoliko ne vidite ured na listi, možete dodati novi klikom na link 'Dodaj ured'", "If you don't see the Organization in the list, you can add a new one by clicking link 'Add Organization'.": 'Ukoliko ne vidite organizaciju u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj Organizaciju"', "If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'Ukoliko ne vidite organizaciju u popisu, možete dodati novu tako što ćete kliknuti na link "Kreiraj Organizaciju"', "If you don't see the project in the list, you can add a new one by clicking link 'Create Project'.": "Ukoliko projekt ne vidite u popisu, možete dodati novi klikom na link 'Kreiraj projekt'.", "If you don't see the Sector in the list, you can add a new one by clicking link 'Create Sector'.": "Ukoliko ne vidite sektor na listi, možete dodati novi klikom na link 'Kreiraj sektor'", "If you don't see the Type in the list, you can add a new one by clicking link 'Add Region'.": "Ukoliko ne vidite tip na listi, možete dodati novi klikom na link 'Dodaj region'", "If you don't see the type in the list, you can add a new one by clicking link 'Create Activity Type'.": "Ukoliko ne vidite tip na listi, možete dodati novi klikom na link 'Kreiraj tip aktivnosti'", "If you don't see the Type in the list, you can add a new one by clicking link 'Create Facility Type'.": "Ukoliko ne vidite tip na listi, možete dodati novi klikom na link 'Kreiraj tip objekta'", "If you don't see the Type in the list, you can add a new one by clicking link 'Create Office Type'.": "Ukoliko ne vidite tip na listi, možete dodati novi klikom na link 'Kreiraj tip kancelarije'", "If you don't see the Type in the list, you can add a new one by clicking link 'Create Organization Type'.": 'Ukoliko ne vidite tip u popisu, možete dodati novi tako što ćete kliknuti na link "Kreiraj tip organizacije"', "If you don't see the vehicle in the list, you can add a new one by clicking link 'Add Vehicle'.": 'Ukoliko ne vidite vozilo u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj vozilo"', "If you enter a foldername then the layer will appear in this folder in the Map's layer switcher. A sub-folder can be created by separating names with a '/'": "Ako unesete ime diektorija, onda će se sloj pojaviti u tom direktoriju u prebacivaču sloja mape. Poddirektorij se može kreirati razdvajanjem imena s '/'", 'If you know what the Geonames ID of this location is then you can enter it here.': 'Ako znate koje je Geonames ime područja (ID) ove lokacije unesite ga ovdje', 'If you know what the OSM ID of this location is then you can enter it here.': 'Ako znate OSM ID ove lokacije, možete ga unijeti ovdje.', 'If you need to add a new document then you can click here to attach one.': 'Ako vam je potrebno da dodate novi dokument onda kliknite ovdje kako biste dodali jedan.', "If you specify a module then this will be used as the text in that module's index page": 'Ako navedete modul, ovo će se koristiti kao tekst u indeksnoj stranici modula', "If you specify a resource then this will be used as the text in that resource's summary page": 'Ako navedete resurs, ovo će se koristiti kao tekst u sumarnoj stranici tog resursa', 'If you want several values, then separate with': 'Ukoliko želite više vrijednosti, onda razdvojite sa', 'If you would like to help, then please': 'Ako zelite pomoći, samo izvolite', 'If you would like to help, then please %(sign_up_now)s': 'Ako želite pomoći, onda %(sign_up_now)s', 'ignore': 'zanemari', 'Ignore Errors?': 'Ignoriši greške?', 'Illegal Immigrant': 'Ilegalni doseljenik', 'Image': 'Slika', 'Image added': 'Slika dodana', 'Image deleted': 'Slika obrisana', 'Image Details': 'Detalji slike', 'Image File(s), one image per page': 'Datoteka (datoteke) slika, prikaz jedne slike po stranici', 'Image Tags': 'Oznake na slikama', 'Image Type': 'Vrsta slike', 'Image updated': 'Slika ažurirana', 'Image Upload': 'Postavi sliku', 'Image/Other Attachment': 'Slika/Drugi dodaci', 'Imagery': 'Lik', 'Images': 'Slike', 'Immediate reconstruction assistance, Rank': 'Pomoć u hitnoj rekonstrukciji, stepen', 'Immediately': 'Odmah', 'Immigration and Customs Capabilities': 'Carinske i imigracione mogućnosti', 'Impact added': 'Utjecaj dodat', 'Impact Assessments': 'Procjene utjecaja', 'Impact deleted': 'Utjecaj obrisan', 'Impact Details': 'Detalji utjecaja', 'Impact Type': 'Tip utjecaja', 'Impact Type added': 'Dodan tip utjecaja', 'Impact Type deleted': 'Tip utjecaja obrisan', 'Impact Type Details': 'Detalji o tipu utjecaja', 'Impact Type updated': 'Ažurirana vrsta utjecaja', 'Impact Types': 'Tip utjecaja', 'Impact updated': 'Utjecaj ažuriran', 'Impacts': 'Utjecaji', 'implanted': 'implantat', 'import': 'uvoz', 'Import': 'Uvoz', 'Import & Export Data': 'Uvoz i izvoz podataka', 'Import Activity Data': 'Uvezi podatke aktivnosti', 'Import Activity Type data': 'Uvezi podatke tipa aktivnosti', 'Import Airports': 'Uvezi aerodrome', 'Import and Export': 'Uvoz i izvoz', 'Import Annual Budget data': 'Uvezi podatke godišnje budžeta', 'Import Assets': 'Uvezi sredstva', 'Import Awards': 'Uvezi nagrade', 'Import Base Stations': 'Uvezi bazne stanice', 'Import Catalog Items': 'Uvezi stavke kataloga', 'Import Certificates': 'Uvezi certifikate', 'Import Community Data': 'Uvezi podatke zajednice', 'Import Completed Assessment Forms': 'Uvezi završen formular ocjene', 'Import Contacts': 'Uvezi kontakte', 'Import Courses': 'Uvezi kurseve', 'Import Data': 'Uvezi podatke', 'Import Data for Theme Layer': 'Uvezi podatke za tematskog sloja', 'Import Departments': 'Uvezi odjeljenja', 'Import Event Types': 'Uvezi tipove događaja', 'Import Facilities': 'Uvezi objekte', 'Import Facility Types': 'Uvezi vrste objekata', 'Import File': 'Uvezi datoteku', 'Import File deleted': 'Unosna datoteka izbrisana', 'Import File Details': 'Uvezi detalje datoteke', 'Import Files': 'Uvezi datoteke', 'Import from CSV': 'Uvezi iz CSV', 'Import from OpenStreetMap': 'Uvezi iz OpenStreetMap', 'Import from Ushahidi Instance': 'Importuj iz Ushahidi instance', 'Import Hazard data': 'Uvezi podatke o riziku', 'Import Hazards': 'Uvezi rizike', 'Import Heliports': 'Uvezi heliodrome', 'Import Hours': 'Uvezi sate', 'Import if Master': 'Uvezi ako je Master', 'Import Incident Reports': 'Uvezi izvještaje o incidentu', 'Import Incident Reports from Ushahidi': 'Uvezi izvještaj o incidentu iz Ushahidi', 'Import Incident Types': 'Uvezi tipove incidenta', 'Import Job': 'Uvezi posao', 'Import Job Count': 'Broj poslova uvoza', 'Import job created': 'Posao za uvoz kreiran', 'Import Jobs': 'Uvezi poslove', 'Import Layers': 'Uvezj slojeve', 'Import Location data': 'Uvezi podatke lokacije', 'Import Location Data': 'Uvezi podatke lokacije', 'Import Locations': 'Uvezi lokacije', 'Import Logged Time data': 'Uvezi zabilježene vremenske podatke', 'Import multiple tables as CSV': 'Uvoz više tabela kao CSV', 'Import New File': 'Uvezi novu datoteku', 'Import Offices': 'Uvezi kancelarije', 'Import Organizations': 'Uvezi organizacije', 'Import Participant List': 'Uvezi listu učesnika', 'Import Participants': 'Uvezi učesnike', 'Import Partner Organizations': 'Uvezi partnerske organizacije', 'Import PoI Types': 'Uvezi tipove tačaka interesa', 'Import Points of Interest': 'Uvezi tačke interesa', 'Import Policies & Strategies': 'Uvezi politiku ili strategiju', 'Import Posts': 'Uvezi blok ugradivog teksta', 'Import Project Organizations': 'Uvezi organizacije projekta', 'Import Projects': 'Uvezi projekte', 'Import Resource Types': 'Uvezi tipove resursa', 'Import Resources': 'Uvezi resurse', 'Import Seaports': 'Uvezi luke', 'Import Sector data': 'Uvezi podakte o sektoru', 'Import Series': 'Uvezi serije', 'Import Service data': 'Uvezi podatke usluge', 'Import Services': 'Uvezi usluge', 'Import Staff': 'Uvezi osoblje', 'Import Suppliers': 'Uvezi dobavljače', 'Import Tags': 'Uvezi oznake', 'Import Tasks': 'Uvezi zadatke', 'Import Template Layout': 'Uvezi raspored predložaka', 'Import Templates': 'Uvezi predloške', 'Import Theme data': 'Uvezi podatke teme', 'Import Training Events': 'Uvezi događaje obuke', 'Import Training Participants': 'Uvezi učesnike obuke', 'Import Users': 'Uvezi korisnike', 'Import Volunteer Cluster Positions': 'Uvezi pozicije skupa volontera', 'Import Volunteer Cluster Types': 'Uvezi tipove skup volontera', 'Import Volunteer Clusters': 'Uvezi skupove volontera', 'Import Volunteers': 'Uvezi volontere', 'Import Warehouse Stock': 'Uvezi zalihu skladišta', 'Import Warehouses': 'Uvezi skladišta', 'Import/Export': 'Uvoz/Izvoz', 'Import/Master': 'Uvezi/Master', 'Important': 'Važno', 'Importantly where there are no aid services being provided': 'Važnije gdje nije pružena pomoć', 'Imported': 'Uvezeno', 'Importing data from spreadsheets': 'Unošenje podataka iz tabela', 'Improper decontamination': 'Nepravilna dekontaminacija', 'Improper handling of dead bodies': 'Nepravilno postupanje sa mrtvim tijelima', 'improvement': 'poboljšanje', 'In': 'U', 'In Catalogs': 'U katalozima', 'in Deg Min Sec format': 'u Stepeni Minute Sekunde formatu', 'In error': 'Greška', 'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'U GeoServer, ovo je ime sloja. Unutar WFS getCapabilities, ovo je dio sa FeatureType imenom nakon dvotačke(:).', 'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'U GeoServer, ovo je ime radnog prostora. Unutar WFS getCapabilities, ovo je dio sa FeatureType imenom nakon dvotačke(:).', 'in GPS format': 'U GPS formatu', 'in Inv.': 'u Inv.', 'In Inventories': 'U zalihama', 'In order to be able to edit OpenStreetMap data from within %(name_short)s, you need to register for an account on the OpenStreetMap server.': 'Da možete mijenjati OpenStreetMap podatke iz %(name_short)s, trebate registrovati nalog na OpenStreetMap serveru.', 'In Process': 'U procesu', 'In Progress': 'U Toku', 'In Stock': 'Na zalihi', 'in Stock': 'na zalihi', 'In transit': 'U prijelazu', 'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'U rasporedu prozora karta se maksimizira da popuni prozor , nema potrebe da se ovdje postavlja velika vrijednost.', 'inactive': 'neaktivno', 'Inbound Mail Settings': 'Podešavanja ulaznih mail-ova', 'InBox': 'Dolazna pošta', 'Incident': 'Slučaj', 'Incident added': 'Dodat incident', 'Incident Categories': 'Kategorije incidenata', 'Incident Commander': 'Komandir incidenata', 'Incident Details': 'Detalji o incidentu', 'Incident removed': 'Incident uklonjen', 'Incident Report': 'Izvještaj o incidentu', 'Incident Report added': 'Dodat izvještaj o incidentu', 'Incident Report deleted': 'Obrisan izvještaj o incidentu', 'Incident Report Details': 'Detalji o izvještaju incidenta', 'Incident Report removed': 'Uklonjen izvještaj o incidentu', 'Incident Report updated': 'Ažuriran izvještaj o incidentu', 'Incident Reporting': 'Izvještavanje o incidentu', 'Incident Reporting System': 'Sistem za izvještavanje o incidentima', 'Incident Reports': 'Izvještaji o incidentu', 'Incident Timeline': 'Vremenski tok incidenta', 'Incident Type': 'Tip incidenta', 'Incident Type added': 'Vrsta incidenta dodana', 'Incident Type Details': 'Detalji o vrsti incidenta', 'Incident Type removed': 'Tip incidenta obrisan', 'Incident Type updated': 'Vrsta incidenta ažurirana', 'Incident Types': 'Tipovi incidenta', 'Incident updated': 'Ažuriran incident', 'Incidents': 'Incidenti', 'Include any special requirements such as equipment which they need to bring.': 'Uključite bilo koje posebne zahtjeve kao npr. opremu koju trebaju donijeti.', 'Include core files': 'Uključi osnovne datoteke', 'Include Entity Information?': 'Uključi informaciju o jedinki?', 'Include only items purchased within the specified dates.': 'Uključi samo stavke kupljene unutar navedenih datuma.', 'Include only items that expire within the specified dates.': 'Uključi samo stavke koje ističu unutar navedenih datuma.', 'Include only items where quantity is in this range.': 'Uključi samo stavke čija je količina unutar navedenog opsega.', "includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'Uključuje GroundOverlay ili ScreenOverlay koji još nisu podržani u OpenLayers, tako da možda neće raditi ispravno.', 'Incoming': 'Dolazni', 'Incoming Shipment canceled': 'Dolazna pošiljka otkazana', 'Incoming Shipment updated': 'Dolazna pošiljka je ažurirana', 'Incoming Shipments': 'Dolazne pošiljke', 'Incomplete': 'Nepotpuno', 'Incorrect parameters': 'Nevažeći parametri', 'India': 'Indija', 'Individuals': 'Pojedinci', 'Indonesia': 'Indonezija', 'Industrial': 'Industrijska', 'Industrial Crime': 'Industrijski kriminal', 'Industry close to village/camp': 'Industrija u blizini sela/kampa', 'Industry Fire': 'Industrijska vatra', 'Infant (0-1)': 'Novorođenče (0-1)', 'Infectious Disease': 'Infektivne bolesti', 'Infectious Disease (Hazardous Material)': 'Zarazna bolest (Opasan materijal)', 'Infectious Diseases': 'Zarazne bolesti', 'Infestation': 'Napast', 'Informal camp': 'Neformalni kamp', 'Informal Leader': 'Neformalni vođa', 'Information gaps': 'Praznine u informacijama', 'Information Source': 'Izvor informacije', 'Infusion catheters available': 'Sonde za infuziju dostupne', 'Infusion catheters need per 24h': 'Infuzioni kateteri potrebni u 24h', 'Infusion catheters needed per 24h': 'Infuzijski kateteri potrebni po 24h', 'Infusions available': 'Dostupne infuzije', 'Infusions needed per 24h': 'Infuzija potebna u 24h', 'Inherited?': 'Naslijeđeni?', 'initial assessment': 'Početna procjena:', 'Initials': 'Inicijali', 'injured': 'povrijeđeni', 'Injuries': 'Povrede', 'input': 'ulaz', 'Input Job': 'Ulazni posao', 'insert new': 'Ubaci novi', 'insert new %s': 'dodaj novi %s', 'Inspected': 'Pregledano', 'Inspection Date': 'Datum Inspekcije', 'Inspection date and time': 'Datum i vrijeme inspekcije', 'Inspection time': 'Vrijeme inspekcije ili pregleda', 'Inspector ID': 'ID inspektora', 'Instance Type': 'Tip instance', 'Instance URL': 'URL instance', 'Instant Porridge': 'Instant supa', "Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'Umjesto automatske sinhronizacije sa ostalih tačaka mreže, možete izvršiti sinhronizaciju preko datoteka, što je neophodno na mjestima gdje nema mreže. Možete koristiti ovu stranicu da uvezete sinhronizacijske podatke iz datoteka, kao i da izvezete podatke u sinhronizacijske datoteke. Kliknite na link desno da biste otišli na ovu stranicu.', 'Institution': 'Institucija', 'Instructor': 'Instruktor', 'Instrument Landing System': 'Instrumentalni sistem za slijetanje', 'Insufficient': 'Nedovoljno', 'insufficient number of pages provided': 'naveden nedovoljan broj strana', 'Insufficient Privileges': 'Nedovoljno ovlasti', 'Insufficient privileges': 'Nedovoljno ovlasti', 'Insufficient vars: Need module, resource, jresource, instance': 'Nedovoljan broj promjenjivih: potrebni su modul, resurs, jresurs, instanca', 'Insurance': 'Osiguranje', 'Insurance Renewal Due': 'Rok za obnovu osiguranja', 'Intake Items': 'Ulazne stavke', 'Intergovernmental': 'Međuvladina', 'Intergovernmental Organisation': 'Međuvladina organizacija', 'Intergovernmental Organization': 'Međuvladina Organizacija', 'Interior walls, partitions': 'Unutarnji zidovi, pregrade', 'Internal Features': 'Interne karakteristike', 'Internal Shipment': 'Interne pošiljke', 'Internal State': 'Unutrašnje stanje', 'International NGO': 'Međunarodna NVO', 'International Organization': 'Međunarodna organizacija', 'International Staff': 'Međunarodno osoblje', 'Intervention': 'Intervencija', 'Interview taking place at': 'intervju se održava u', 'invalid': 'neispravno', 'Invalid': 'Nevažeće', 'Invalid data: record %(id)s not accessible in table %(table)s': 'Pogrešni podaci: slog %(id)s nije dostupan u tabeli %(table)s', 'Invalid email': 'Neispravan email', 'Invalid form (re-opened in another window?)': 'Pogrešan formular (ponovo otvoren u drugom prozoru?)', 'Invalid Location!': 'Pogrešna lokacija!', 'Invalid Organisation ID!': 'Neispravan ID organizacije.', 'Invalid Organization ID!': 'Neispravan organizacijski ID.', 'Invalid phone number': 'Netačan broj telefona', 'Invalid phone number!': 'Pogrešan broj telefona!', 'Invalid Query': 'Pogrešan upit', 'invalid request': 'nevažeći zahtjev', 'Invalid request!': 'Nevažeći zahtjev!', 'Invalid Site!': 'Pogrešno mjesto!', 'Invalid ticket': 'Nevažeća kartica', 'invalid ticket': 'nevažeća kartica', 'Invalid UUID!': 'Nevažeći JMBG!', 'Inventories': 'Zalihe', 'Inventories with Item': 'Skladišta s stavkama', 'Inventories with Items': 'Skladišta s stavkama', 'Inventory': 'Skladište', 'Inventory Adjustment': 'Prilagođenje skladišta', 'Inventory Adjustment Item': 'Prilagođenje artikala u skladištu', 'Inventory functionality is available for:': 'Funkcionalnost skladišta je dostupna za', 'Inventory Item': 'Stavka - Inventar (popis)', 'Inventory Item added': 'Dodana stavka inventara', 'Inventory Item deleted': 'Stavka skladišta obrisana', 'Inventory Item Details': 'Popis detalja artikala u skladištu', 'Inventory Item updated': 'Ažurirana stavka inventara', 'Inventory Items': 'Stavke skladišta', 'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Stavke skladišta uključuju prehrambene artikle kao i one koji će biti pretvoreni u sredstva na njihovim odredištima.', 'Inventory Location': 'Lokacija skladišta', 'Inventory Management': 'Upravljanje zalihama', 'Inventory of Effects': 'Inventar efekata', 'Inventory Stock Position': 'Pozicija zaliha inventara', 'Inventory Store added': 'Dodana stavka inventara', 'Inventory Store Details': 'Popis detalja artikala u skladištu', 'Inventory/Ledger': 'Skladište/knjigovodstvo', 'Iraq': 'Irak', 'Ireland': 'Irska', 'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'je centralno online skladište gdje se mogu čuvati informacije o svim žrtvama nesreće i porodicama, posebno identificiranim gubicima, evakuisanim i raseljenim osobama. Informacije poput imena, godina, kontakt telefona, broja lične karte, trenutnog mjesta boravka i drugih detalja su pohranjene. Slike i otisci prstiju ljudi se mogu učitati u sistem. Ljudi se mogu rasvrstavati po grupama zbog efikasnosti i pogodnosti.', 'Is adequate food and water available for these institutions?': 'Da li je dostupna adekvatna hrana i voda za ove institucije?', 'Is editing level L%d locations allowed?': 'Da li je uređivanje nivoa L%d lokacija dopušteno?', 'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'Predviđa se da se sastoji od nekoliko pod-modula koji rade zajedno kako bi osigurali složenu funkcionalnost uz pomoć kojih će organizacije lakše upravljati olakšanjima i projektnim predmetima. To uključuje sistem za unos, sistem za upravljanje skladištima, praćenje proizvoda, upravljanje lancem opskrbe, upravljanje voznim parkom, nabavka, financijsko praćenje i druge sposobnosti za upravljanje sredstvima i resursima.', 'Is it safe to collect water?': 'Da li je bezbjedno sakupljanje vode?', 'Is this a strict hierarchy?': 'Da li je ovo stroga hijerarhija?', 'Israel': 'Izrael', 'Issued without Record': 'Izdato bez zapisa', 'Issuing Authority': 'Autoritet (odgovorno lice) za dodjeljivanje resursa', 'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Snima ne samo dio gdje su aktivni ,već također snima podatke u dometu projekata koji pružaju u svakom području.', 'It gives four options: No Sync, Newer Timestamp, Keep All, Replace All': 'Pruža četiri mogućnosti: Bez sinhronizacije, Novija vremenska oznaka, Zarži sve, Zamijeni sve', 'It is built using the Template agreed by a group of NGOs working together as the': 'Izgrađeno koristeći šablon usaglašen od strane grupe NVO radeći zajedno kao', 'Italian': 'Talijanski', 'Italy': 'Italija', 'Item': 'Stavka', 'Item added': 'Stavka dodana', 'Item added to Inventory': 'Stavka je dodana u inventar', 'Item Added to Shipment': 'Dodana Stavka za Pošiljku.', 'Item added to shipment': 'Predmet dodan u pošiljku', 'Item added to stock': 'Stavka dodana u zalihu', 'Item already in budget!': 'Stavka je već u budžetu !', 'Item already in Bundle!': 'Stavka već u paketu!', 'Item already in Kit!': 'Stavka već u kompletu!', 'Item Catalog added': 'Dodata stavka u katalog', 'Item Catalog Categories': 'Kategorije kataloga stavki ', 'Item Catalog Category': 'Kategorija kataloga stavki', 'Item Catalog Category added': 'Dodana kategorija kataloga stavki', 'Item Catalog Category deleted': 'Obrisana kategorija kataloga stavki', 'Item Catalog Category Details': 'Detalji kategorije kataloga stavke', 'Item Catalog Category updated': 'Kategorija kataloga stavke ažurirana', 'Item Catalog deleted': 'Katalog stavki izbrisan', 'Item Catalog Details': 'Pojedinosti o katalogu stavki', 'Item Catalog updated': 'Katalog stavki je ažurirana', 'Item Catalogs': 'Katalozi stavki', 'Item Categories': 'Kategorije stavki', 'Item Category': 'Kategorija stavki', 'Item Category added': 'Dodata kategorija stavki', 'Item Category deleted': 'Obrisana kategorija stavki', 'Item Category Details': 'Detalji o kategorijama stavki', 'Item Category updated': 'Ažurirana kategorija stavki', 'Item Code': 'Šifra stavke', 'Item deleted': 'Stavka obrisana', 'Item Details': 'Detalji o predmetu', 'Item name': 'Ime stavke', 'Item Pack added': 'Paket stavki je dodan', 'Item Pack deleted': 'Paket stavki je obrisan', 'Item Pack Details': 'Sadržaj paketa', 'Item Pack updated': 'Paket stavki je ažuriran', 'Item Packet added': 'Dodat paket stavki', 'Item Packet deleted': 'Paket sa stavkama obrisan', 'Item Packet Details': 'Detalji sadržaja paketa', 'Item Packet updated': 'Ažuriran paket stavki', 'Item Packets': 'Paketi stavki', 'Item Packs': 'Paketi sa stavkama', 'Item quantity adjusted': 'Prilagođena količina stavke', 'Item removed from Inventory': 'Stavka uklonjena iz inventara', 'Item Status': 'Status stavke', 'Item Sub-Categories': 'Pod-kategorije stavke', 'Item Sub-Category': 'Podkategorija stavke', 'Item Sub-Category deleted': 'Obrisana podkategorija stavki', 'Item Sub-Category Details': 'Detalji podkategorije stavke', 'Item Sub-Category updated': 'Podkategorija stavke ažurirana', 'Item Tracking Status': 'Status praćenja stavke', 'Item updated': 'Stavka ažurirana', 'Item(s) added to Request': 'Stavke dodane u zahtjev', 'Item(s) deleted from Request': 'Stavke obrisane iz zahtjeva', 'Item(s) updated on Request': 'Stavke ažurirane u zahtjev', 'Item/Description': 'Stavka/opis', 'Items': 'Stavke', 'Items in Category are Vehicles': 'Stavke u kategoriji su vozila', 'Items in Category can be Assets': 'Stavke u kategoriji mogu biti sredstva', 'Items in Request': 'Stavke u zahtjevu', 'Items in Stock': 'Stavke u zalihi', 'Items/Description': 'Stavke/opis', 'Jamaica': 'Jamajka', 'Japanese': 'Japanski', 'Jerry can': 'Jerry može', 'Jew': 'Jevrej', 'Jewish': 'Jevrejski', 'JNAP Priorities': 'JNAP Prioriteti', 'JNAP-1: Strategic Area 1: Governance': 'JNAP-1: Strateško područje 1: Vlada', 'JNAP-2: Strategic Area 2: Monitoring': 'JNAP-2: Strateško područje 2: Praćenje', 'JNAP-3: Strategic Area 3: Disaster Management': 'JNAP-3: Strateško područje 3: Upravljanje u katastrofama', 'JNAP-4: Strategic Area 4: Risk Reduction and Climate Change Adaptation': 'JNAP-4: Strateško područje 4: Smanjenje rizika i prilagođenje na promjene klime', 'Job added': 'Posao dodan', 'Job deleted': 'Posao obrisan', 'Job reactivated': 'Posao ponovo aktiviran', 'Job Role': 'Radno mjesto', 'Job Role added': 'Dodana uloga posla', 'Job Role Catalog': 'Katalog radnih mjesta', 'Job Role deleted': 'Pozicija obrisana', 'Job Role Details': 'Opis uloge posla', 'Job Role updated': 'Ažurirana uloga posla', 'Job Roles': 'Radno mjesto', 'Job Schedule': 'Raspored poslova', 'Job Title': 'Radno mjesto', 'Job Title added': 'Radno mjesto dodano', 'Job Title Catalog': 'Katalog radnih mjesta', 'Job Title deleted': 'Radno mjesto obrisano', 'Job Title Details': 'Detalji radnog mjesta', 'Job Title updated': 'radnog mjesto ažurirano', 'Job updated': 'Posao ažuriran', 'Jobs': 'Poslovi', 'joining': 'spajanje', 'Joint National Action Plan for Disaster Risk Management and Climate Change Adaptation. Applicable to Cook Islands only': 'Udruženi državni akcioni plan za upravljanje rizicima u slučaju katastrofe i prilagođenju na klimatske promjene. Primjenjivo samo na Kukova ostrva', 'Journal': 'Dnevnik', 'Journal entry added': 'Unos u dnevnik dodan', 'Journal entry deleted': 'Unos u dnevnik obrisan', 'Journal Entry Details': 'Detalji stavke žurnala', 'Journal entry updated': 'Unos u dnevnik ažuriran', 'JS Layer': 'JS sloj', 'Just Once': 'Samo jednom', 'Kazakhstan': 'Kazahstan', 'Keep All': 'Zadrži sve', 'Keep Duplicate': 'Sačuvaj duplikat', 'Keep Local': 'Zadrži lokalne', 'Keep Original': 'Sačuvaj original', 'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'prati sve dolazne kartice dopuštajući im da se kategoriziraju i preusmjere na odgovarajuća mjesto za dalju akciju.', 'keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'prati sve humanitarne organizaije koje djeluju u regionu katastrove. Snima ne samo mjesta gdje su aktivne ,već također snima informacije o opsegu projekata koje one provode u svakom području.', 'Kenya': 'Kenija', 'Key': 'Ključ', 'Key added': 'Ključ dodan', 'Key deleted': 'Obrisan Ključ', 'Key Details': 'Ključni detalji', 'Key updated': 'Kljuc ažuriran', 'Key Value pairs': 'Parovi ključ-vrijednost', 'Keys': 'Ključevi', 'Keyword': 'Ključna riječ', 'Keyword Added': 'Ključna riječ dodana', 'Keyword Deleted': 'Ključna riječ obrisana', 'Keyword Updated': 'Ključna riječ ažurirana', 'Keywords': 'Ključne riječi', 'kit': 'komplet', 'Kit': 'Komplet', 'Kit added': 'Komplet je dodan', 'Kit canceled': 'Komplet otkazan', 'Kit Contents': 'Sadržaj kompleta', 'Kit Created': 'Komplet kreiran', 'Kit deleted': 'Komplet obrisan', 'Kit Details': 'Detalji kompleta', 'Kit Item': 'Stavka kompleta', 'Kit Items': 'Stavke kompleta', 'Kit Updated': 'Komplet je ažuriran', 'Kit updated': 'Komplet je ažuriran', 'Kit?': 'Komplet?', 'Kits': 'Kompleti', 'Kitting': 'Pakovanje', 'KML Layer': 'KML sloj', 'Known Identities': 'Poznate ličnosti', 'Known incidents of violence against women/girls': 'Poznati incidenti nasilja nad ženama/djevojkama', 'Known incidents of violence since disaster': 'Poznati slučajevi nasilja od katastrofe', 'Known Locations': 'Poznate lokacije', 'Korea, North': 'Sjeverna Koreja', 'Korea, South': 'Južna Koreja', 'Korean': 'Korejski', 'KPIs': 'KPI', 'Kuwait': 'Kuvajt', 'Kyrgyzstan': 'Kirgistan', 'Label': 'Oznaka', 'Lack of material': 'Nedostatak materijala', 'Lack of school uniform': 'Nedostatak školske uniforme', 'Lack of supplies at school': 'Nedostatak zaliha u školi', 'Lack of transport to school': 'Nedostatak prevoza ka školi', 'Lactating women': 'Dojilje', 'Ladder Vehicle 30': 'Platformsko vozilo 30', 'Landslide': 'Klizište', 'Language': 'Jezik', 'Language Code': 'Šifra jezika', 'large': 'širok', 'Last': 'Zadnje', 'Last Checked': 'Zadnja provjera', 'Last Contacted': 'Zadnji kontakt', 'Last Downloaded': 'Zadnje preuzimanje', 'Last known location': 'Posljednja poznata lokacija', "Last Month's Work": 'Rad u zadnjem mjesecu', 'Last Name': 'Prezime', 'Last name': 'Prezime', 'Last Polled': 'Zadnje pregledanje', 'Last Pull': 'Zadnje povlačenje', 'Last pull on': 'Povučeno zadnji put', 'Last Push': 'Zadnje guranje', 'Last push on': 'Gurnuto zadnji put', 'Last run': 'Posljednje pokretanje', 'Last status': 'Zadnji status', 'Last synchronization on': 'Sinhronizovano zadnji put', 'Last synchronization time': 'Vrijeme posljednje sinhronizacije', 'Last updated': 'Zadnji put ažurirano', 'Last updated by': 'Zadnji put ažurirao', 'Last updated on': 'Zadnji put ažurirano', "Last Week's Work": 'Zadnja radna sedmica', 'Latest Information': 'Posljednja informacija', 'Latitude': 'Geografska širina', 'Latitude & Longitude': '(geografska) Dužina i Širina', 'Latitude and Longitude are required': 'Potrebne geografska širina i dužina', 'Latitude is Invalid!': 'Geografska širina nije ispravna!', 'Latitude is North - South (Up-Down).': 'Geografska širina je sjever-jug (gore-golje)', 'Latitude is North-South (Up-Down).': 'Geografska širina je sjever-jug(gore-dolje)', 'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Geografska širina se mjeri od sjevera ka jugu (gore-dolje). Geografska širina je nula na ekvadoru, pozitivna na sjevernoj hemisferi i negativna na južnoj.', 'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Geografska širina je nula na ekvadoru, pozitivna na sjevernoj hemisferi i negativna na južnoj.', 'Latitude must be between -90 and 90.': 'Geografska širina mora biti između -90 i 90', 'Latitude of far northern end of the region of interest.': 'Geografska širina krajnjeg sjevernog kraja regije', 'Latitude of far southern end of the region of interest.': 'Geografska širina je daleko od južnog kraja interesnog područja', 'Latitude of Map Center': 'Geografska širina od centra karte', 'Latitude should be between': 'Geografska širina treba da bude između', 'latrines': 'zahodi', 'Latrines': 'Zahodi', 'Latvia': 'Latvija', 'Law enforcement, military, homeland and local/private security': 'Organi provođenja zakona, vojska, državna i lokalna/privatna sigurnost', 'Layer': 'Sloj', 'Layer added': 'Dodan sloj', 'Layer deleted': 'Obrisan sloj', 'Layer Details': 'Detalji sloja', 'Layer has been Disabled': 'Sloj je onemogućen', 'Layer has been Enabled': 'Sloj je omogućen', 'Layer ID': 'ID Nivoa', 'Layer Name': 'Ime sloja', 'Layer Properties': 'Svojstva sloja', 'Layer removed from Symbology': 'Sloj uklonjen iz značenja simbola', 'Layer Type': 'Tip sloja', 'Layer updated': 'Ažuriran sloj', 'Layers': 'Slojevi', 'Layers updated': 'Slojevi ažurirani', 'Layout': 'Raspored', 'Lead Implementer': 'Vodeći realizator', 'Lead Implementer for this project is already set, please choose another role.': 'Glavni implementator za ovaj projekt je već postavljen, molim izaberite drugu ulofz.', 'Lead Organization': 'Vodeća organizacija', 'Leader': 'Vođa', 'Leave blank to request an unskilled person': 'Ostavi prazno za zahtjev za nekvalificiranom osobom', 'leave empty to detach account': 'ostavite prazno da odvojite račun', 'Lebanon': 'Liban', 'left': 'lijevo', 'Left-side is fully transparent (0), right-side is opaque (1.0).': 'Lijeva strana je potpuno prozirno (0), desna strana je neprovidno (1.0).', 'Left-to-Right': 'Sa lijeva na desno', 'Legend': 'Legenda', 'Legend Format': 'Format legende', 'legend URL': 'opis URL', 'Legend URL': 'URL legende', 'Length': 'Dužina', 'Length (m)': 'Dužina (m)', 'Lesotho': 'Lesoto', 'less': 'manje', 'Less Options': 'Manje opcija', 'Level': 'Nivo', 'Level 1': 'Nivo 1', 'Level 1 Assessment added': 'Procjena razine 1 dodana', 'Level 1 Assessment deleted': 'Izbrisana procjena nivoa 1', 'Level 1 Assessment Details': 'Detalji nivoa 1 procjene', 'Level 1 Assessment updated': 'Procjena prvog stepena ažurirana', 'Level 1 Assessments': 'procjena nivoa1', 'Level 2': 'Nivo 2', 'Level 2 Assessment added': 'Dodat nivo procjene 2.', 'Level 2 Assessment deleted': 'Procjena Nivoa 2 obrisana', 'Level 2 Assessment Details': 'Detalji procjene nivoa 2', 'Level 2 Assessment updated': 'Procjena drugog nivoa ažurirana', 'Level 2 Assessments': 'Procjena nivoa 2', 'Level 2 or detailed engineering evaluation recommended': 'Preporučuje se nivo 2 ili procjena izvedbenog projekta', 'Level 3': 'Nivo 3', "Level is higher than parent's": 'Nivo je veći nego kod roditelja', 'Level of Award': 'Nivo nagrade', 'Level of competency this person has with this skill.': 'Nivo sposobnosti koju ova osoba ima s tom vještinom.', 'Liberia': 'Liberija', 'Library support not available for OpenID': 'Podrška za biblioteku nije dostupna za OpenID', 'Libya': 'Libija', 'LICENCE': 'DOZVOLA', 'LICENSE': 'DOZVOLA', 'License Number': 'Broj dozvole', 'License Plate': 'Registarske tablice', 'Liechtenstein': 'Lihtenštajn', 'light': 'lagane', 'Lighting': 'Osvjetljenje', 'Line': 'Linija', 'LineString': 'Žica', 'Link': 'Veza', 'Link (or refresh link) between User, Person & HR Record': 'Veza (ili osvježena veza) između korisnika, osobe i zapisa o ljudskim resursima', 'Link an Item & Shipment': 'Poveži predmet i pošiljku', 'Link for the RSS Feed.': 'Veza na RSS dovod', 'Link Item & Shipment': 'Poveži predmet i pošiljku', 'Link to this result': 'Veza na ovaj link', 'Links': 'Veze', 'Lips, Shape': 'Usne, oblik', 'List': 'Spisak', 'List %(site_label)s Status': 'Prikaži %(site_label)s status', 'List / Add Baseline Types': 'Popis / Dodaj vrste referentnih vrijednosti', 'List / Add Impact Types': 'Izlistaj/Dodaj tipove utjecaja', 'List / Add Services': 'Izlistaj / Dodaj usluge', 'List / Add Types': 'Izlistaj / Dodaj Tipove', 'List Activities': 'Prikaži aktivnosti', 'List Activity Organizations': 'Prikaži organizacije aktivnosti', 'List Activity Reports': 'Prikaži izvještaje o aktivnostima', 'List Activity Types': 'Prikaži tipove aktivnosti', 'List Addresses': 'Prikaži adrese', 'List Affiliations': 'Prikaži namještenja', 'List Airports': 'Prikaži aerodrome', 'List all': 'Prikaži sve', 'List All': 'Prikaži sve', 'List All Assets': 'Navedite sva sredstva', 'List All Catalog Items': 'Lista svih stavki kataloga', 'List All Catalogs & Add Items to Catalogs': 'Prikaži sve kataloge i dodaj stavke u kataloge', 'List All Commitments': 'Prikaži sva zaduženja', 'List all Entries': 'Prikaži sve unose', 'List All Entries': 'Prikaži sve unose', 'List All Group Memberships': 'Prikaži svo članstvo grupa', 'List All Item Categories': 'Prikaži kategorije stavki', 'List All Memberships': 'Izlistaj sva članstva', 'List All Organization Approvers & Whitelists': 'Prikaži sve potvrđivače u organizaciji i bijele liste', 'List All Received Shipments': 'Izlistaj sve primljene pošiljke', 'List All Records': 'Izlistaj sve zapise', 'List All Reports': 'Prikaži sve izvještaje', 'List All Requested Items': 'Prikaži sve zahtijevane stavke', 'List All Requested Skills': 'Prikaži sve tražene vještinee', 'List All Requests': 'Prikaži sve zahtjeve', 'List All Roles': 'Prikaži sve uloge', 'List All Sent Shipments': 'Izlistaj sve poslane pošiljke', 'List All Users': 'Prikaži sve korisnike', 'List All Vehicles': 'Prikaži sva vozila', 'List Alternative Items': 'Prikaži alternativne stavke', 'List Annual Budgets': 'Prikaži godišnje budžete', 'List Assessment Answers': 'Prikaži odgovore ocjene', 'List Assessment Questions': 'Prikaži pitanja ocjene', 'List Assessment Summaries': 'Izlistaj sažetke procjena', 'List Assessment Templates': 'Prikaži predloške ocjene', 'List Assessments': 'Popis procjenea', 'List Assets': 'Prikaži sredstva', 'List Assigned Human Resources': 'Prikaži dodijeljene ljudske resurse', 'List Availability': 'Pregled dostupnih', 'List available Scenarios': 'Izlistaj dostupne scenarije', 'List Awards': 'Prikaži nagrade', 'List Base Stations': 'Prikaži bazne stanice', 'List Baseline Types': 'Lista tipova referentnih tačaka', 'List Baselines': 'Prikaži referentne tačke', 'List Beneficiaries': 'Prikaži korisnike', 'List Beneficiary Types': 'Prikaži tipove korisnika', 'List Body Finds': 'Prikaži nađena tijela', 'List Branch Organizations': 'Prikaži ogranke organizacije', 'List Brands': 'Prikaži proizvođačke marke', 'List Budgets': 'Izlistaj budžete', 'List Bundles': 'Ispiši pakete', 'List Camp Services': 'Prikaži usluge kampa', 'List Camp Statuses': 'Prikaži statuse kampa', 'List Camp Types': 'Prikaži tipove kampava', 'List Campaign Messages': 'Prikaži poruke kampanje', 'List Campaigns': 'Prikaži kampanje', 'List Camps': 'Prikaži kampove', 'List Cases': 'Prikaži slučajeve', 'List Catalog Items': 'Prikaži stavke kataloga', 'List Catalogs': 'Prikaži kataloge', 'List Category<>Sub-Category<>Catalog Relation': 'Prikaz Kategorija<>Podkategorija<>Kataloški odnos', 'List Certificates': 'Prikaži certifikate', 'List Certifications': 'Prikaži certifikacije', 'List Checklists': 'Prikaži liste zadataka', 'List Cluster Subsectors': 'Izlistaj podsektore skupa', 'List Clusters': 'Prikaži grupisanja', 'List Coalitions': 'Prikaži koalicije', 'List Commitment Items': 'Prikaži stavke zaduženja', 'List Commitments': 'Prikaži zaduženja', 'List Committed People': 'Prikaži zadužene ljude', 'List Communities': 'Prikaži zajednice', 'List Community Contacts': 'Prikaži kontakt podatke zajednice', 'List Competencies': 'Popis Kompetencija', 'List Competency Ratings': 'Prikaži ocjene sposobnosti', 'List Completed Assessment Forms': 'Prikaži završene formulare ocjene', 'List Configs': 'Prikaži konfiguracije', 'List Conflicts': 'lista sukoba', 'List Contact Information': 'Prikaži kontaktne informacije', 'List Contacts': 'Prikaži kontakte', 'List Course Certicates': 'Ispiši certifikovane kurseve', 'List Course Certificates': 'Prikaži certifikate kursa', 'List Courses': 'Prikaži kurseve', 'List Credentials': 'Prikaži akreditive', 'List Current': 'Prikaži trenutne', 'List Data in Theme Layer': 'Prikaži podatke iz tematskog sloja', 'List Departments': 'Prikaži odjeljenja', 'List Details': 'Prikaži detalje', 'List Disaster Assessments': 'Prikaži procjene katastrofe', 'List Distribution Items': 'Prikaži stavke raspodjele', 'List Distributions': 'Prikaži raspodjele', 'List Documents': 'Prikaži dokumente', 'List Donations': 'Prikaži donacije', 'List Donors': 'Prikaži donatore', 'List Education Details': 'Prikaži podatke o obrazovanju', 'List Education Levels': 'Prikaži nivoe obrazovanja', 'List Event Types': 'Prikaži tipove događaja', 'List Events': 'Prikaži događaje', 'List Facilities': 'Prikaži objekte', 'List Facility Types': 'Prikaži vrstw objekata', 'List Feature Classes': 'Izlistaj klase karakteristika', 'List Feature Groups': 'Prikaži grupe karakteristika', 'List Feature Layers': 'Prikaži slojeve karakteristika', 'List Finds': 'Lista pronalaženja', 'List Flood Reports': 'Izlistaj izvještaje o poplavama', 'List Found People': 'Lista nađenih ljudi', 'List GPS data': 'Prikaži GPS podatke', 'List Groups': 'Prikaži grupe', 'List Groups/View Members': 'Izlistaj Grupe/Pogledaj Članove', 'List Hazards': 'Prikaži rizike', 'List Heliports': 'Prikaži heliodrome', 'List Homes': 'Izlistaj domove', 'List Hospitals': 'Prikaži bolnice', 'List Hours': 'Prikaži sate', 'List Human Resources': 'Prikaži ljudske resurse', 'List Identities': 'Prikaži identitete', 'List Images': 'Prikaži slike', 'List Impact Assessments': 'Izlistaj procjene utjecaja', 'List Impact Types': 'Popis vrsta utjecaja', 'List Impacts': 'Nabroji utjecaje', 'List Import Files': 'Ispiši uvezene datoteke', 'List Incident Reports': 'Prikaži izvještaje o incidentu', 'List Incident Types': 'Prikaži tipove incidenta', 'List Incidents': 'Prikaži incidente', 'List Item Catalog Categories': 'Prikaži kategorije stavki kataloga', 'List Item Catalogs': 'Prikaži stavki...', 'List Item Categories': 'Prikaži kategorije stavki', 'List Item Packets': 'Prikaz paketa stavki', 'List Item Packs': 'Prikaži pakete stavki', 'List Item Sub-Categories': 'Prikaži podkategorije stavki', 'List Items': 'Prikaži stavke', 'List Items in Inventory': 'Ispiši stavke u inventaru', 'List Items in Request': 'Prikaži stavke u zahtjevu', 'List Items in Stock': 'Prikaži stavku u zalihi', 'List Job Roles': 'Izlistaj poslovne uloge', 'List Job Titles': 'Prikaži radna mjesta', 'List Jobs': 'Prikaži poslove', 'List Keys': 'Lista ključeva', 'List Keywords': 'Prikaži ključne riječi', 'List Kits': 'Prikaži komplete', 'List Layers': 'Prikaži slojeve', 'List Layers in Profile': 'Prikaži slojeve u profilu', 'List Layers in Symbology': 'Prikaži značenja simbola', 'List Level 1 assessments': 'Izlistaj procjene 1. Nivoa', 'List Level 1 Assessments': 'Prikaži procjene nivoa 1', 'List Level 2 Assessments': 'Ispiši procjene drugog nivoa', 'List Level 2 assessments': 'Ispiši procjene nivoa 2', 'List Location Hierarchies': 'Prikaži hijerarhije lokacija', 'List Locations': 'Prikaži lokacije', 'List Log Entries': 'Prikaži unose zapisnika', 'List Logged Time': 'Prikaži bilježena vremena', 'List Mailing Lists': 'Prikaži liste za slanje poruka', 'List Map Profiles': 'Prikaži konfiguracije mape', 'List Markers': 'Prikaži markere', 'List Members': 'Prikaži članove', 'List Memberships': 'Prikaži članstva', 'List Messages': 'Prikaži poruke', 'List Milestones': 'Prikaži prekretnice', 'List Missing People': 'Lista nestalih ljudi', 'List Missing Persons': 'Prikaži nedostajuće osobe', 'List Missions': 'Prikaži misije', 'List Morgues': 'Kreiraj mrtvačnice', 'List Need Types': 'Prikaži vrste potreba', 'List Needs': 'Lista potreba', 'List Networks': 'Prikaži mreže', 'List of addresses': 'Lista adresa', 'List of Appraisals': 'Prikaži ispunjenja', 'List of CSV files': 'Lista CSV datoteka', 'List of CSV files uploaded': 'Spisak ucitanih CSV(comma separated value) datoteka', 'List of Facilities': 'Prikaži objekte', 'List of Items': 'Potpis predmeta', 'List of Missing Persons': 'Lista osoba koje su nestale', 'List of Peers': 'Lista saradnika', 'List of Professional Experience': 'Prikaži profesionalna iskustva', 'List of Reports': 'Lista Izvještaja', 'List of Requests': 'Lista zahtjeva', 'List of Roles': 'Prikaži uloge', 'List of Spreadsheets': 'Lista proračunskih tablica', 'List of Spreadsheets uploaded': 'Lista poslanih tablica', 'List of Volunteers': 'Lista volontera', 'List of Volunteers for this skill set': 'Lista volontera za ovu skupinu vještina', 'List of Volunteers for this skills set': 'Lista volontera za ovu skupinu vještina', 'List Office Types': 'Prikaži tipove kancelarija', 'List Offices': 'Prikaži kancelarije', 'List Orders': 'Prikaži narudžbe', 'List Organisations': 'Prikaži organizacije', 'List Organization Domains': 'Prikaži domene organizacije', 'List Organization Types': 'Prikaži tipove organizacije', 'List Organizations': 'Prikaži organizacije', 'List Outputs': 'Prikaži izlaze', 'List Participants': 'Prikaži učesnike', 'List Partner Organizations': 'Prikaži partnerske organizacije', 'List Partners': 'Lista partnera', 'List Patients': 'Lista pacijenata', 'List Peers': 'Popis saradnika', 'List Personal Effects': 'Kreiraj lične uticaje', 'List Persons': 'Prikaži osobe', "List Persons' Details": 'Prikaži detalje o osobama', 'List Photos': 'Prikaži fotografije', 'List PoI Types': 'Prikaži tipove tačaka interesa', 'List Points of Interest': 'Prikaži tačke interesa', 'List Policies & Strategies': 'Prikaži politike i strategije', 'List Population Statistics': 'Izlistaj demografsku statistiku', 'List Positions': 'Navedi Pozicije', 'List Posts': 'Prikaži blokove ugradivog teksta', 'List Problems': 'Lista problema', 'List Profiles configured for this Layer': 'Prikaži profile konfigurisane za ovaj sloj', 'List Programs': 'Prikaži programe', 'List Project Organizations': 'Prikaži organizacije projekta', 'List Projections': 'Prikaži projekcije', 'List Projects': 'Prikaži projekte', 'List Question Meta-Data': 'Prikaži metapodatke pitanja', 'List Rapid Assessments': 'Izlistaj brze procjene', 'List Received Items': 'Izlistaj primljene predmete', 'List Received Shipments': 'Prikaži primljene isporuke', 'List Received/Incoming Shipments': 'Prikaži primljene/dolazne pošiljke', 'List Records': 'Prikaži zapise', 'List Recurring Requests': 'Prikaži ponavljajuće zahtjeve', 'List Regions': 'Prikaži područja', 'List Registrations': 'Prikaži registracije', 'List Relatives': 'Izlistaj rodbinu', 'List Reports': 'Prikaži izvještaje', 'List Repositories': 'Prikaži repozitorije', 'List Request Items': 'Lista traženih predmeta', 'List Request Templates': 'Prikaži predloške zahtjeva', 'List Requested Skills': 'Prikaži tražene vještine', 'List Requests': 'Prikaži zahtjeve', 'List Resources': 'Prikaži resurse', 'List Response Summaries': 'Prikaži sumarne odgovore', 'List Responses': 'Prikaz odgovora', 'List Rivers': 'Lista rijeka', 'List Roles': 'Prikaži uloge', 'List Rooms': 'Prikaži sobe', 'List saved searches': 'Prikaži sačuvane pretrage', 'List Scenarios': 'Lista scenarija', 'List School Districts': 'Prikaz rejona škole', 'List School Reports': 'Prikaz izvještaja o školama', 'List Seaports': 'Prikaži luke', 'List Sections': 'Lista sekcija', 'List Sector': 'Prikaz sektora', 'List Sectors': 'Prikaži sektore', 'List Sent Items': 'Prikaži poslane stavke', 'List Sent Shipments': 'Prikaži poslane pošiljke', 'List Series': 'Prikaži serije', 'List Service Profiles': 'Prikaži profile usluga', 'List Services': 'Prikaži usluge', 'List Settings': 'Izlistaj postavke', 'List Shelter Services': 'Prikaži usluge skloništa', 'List Shelter Statuses': 'Prikaži statuse skloništa', 'List Shelter Types': 'Prikaži tipove skloništa', 'List Shelters': 'Prikaži skloništa', 'List Shipment Items': 'Prikaži predmete pošiljke', 'List Shipment/Way Bills': 'Lista Poslanih Pošiljki', 'List Shipment<>Item Relation': 'Prikaži Pošiljka<>Odnos predmeta', 'List Shipments': 'Lista Poslanih Pošiljki', 'List Skill Equivalences': 'Prikaži ekvivalencije vještina', 'List Skill Provisions': 'Izlistaj pružanja vještina', 'List Skill Types': 'Prikaži tipove vještina', 'List Skills': 'Prikaži vještine', 'List Solutions': 'Izlistaj rješenja', 'List Sources': 'Prikaži izvore', 'List Staff': 'Izlistaj osoblje', 'List Staff & Volunteers': 'Prikaži osoblje i vlolontere', 'List Staff Assignments': 'Prikaži dodjele osoblja', 'List Staff Members': 'Prikaži članove osoblja', 'List Staff Types': 'Izlistaj tipove osoblja', 'List Status': 'Ispiši status', 'List Status Reports': 'Prikaži statusne izvještaje', 'List Statuses': 'Prikaži statuse', 'List Stock Adjustments': 'Prikaži prilagođenja zaliha', 'List Stock Counts': 'Prikaži količine zaliha', 'List Stock in Warehouse': 'Prikaži zalihe u skladištima', 'List Storage Bins': 'Prikaz korpi za smještaj', 'List Storage Location': 'Navedi lokacije skladišta', 'List Subscriptions': 'Ispiši pretplate', 'List Subsectors': 'Prikaži podsektore', 'List Suppliers': 'Prikaži dobavljače', 'List Support Requests': 'Izlistaj zahtjeve za podršku', 'List Survey Answers': 'Navedi odgovore anketa', 'List Survey Questions': 'Prikaz anketnih pitanja', 'List Survey Sections': 'Izlistaj sekcije ankete', 'List Survey Series': 'Prikaži istraživačke nizove', 'List Survey Templates': 'Izlistaj šablone za ankete', 'List Symbologies': 'Prikaži značenje simbola', 'List Symbologies for Layer': 'Prikaži značenje simbola za sloj', 'List Tagged Posts': 'Prikaži označene dijelove teksta', 'List Tags': 'Prikaži oznake', 'List Tasks': 'Prikaži zadatke', 'List Teams': 'Prikaži timove', 'List Template Sections': 'Prikaži odjeljke predložaka', 'List Themes': 'Prikaži teme', 'List Tickets': 'Prikaži kartice', 'List Tours': 'Prikaži ture', 'List Tracks': 'Izlistaj praćenja', 'List Training Events': 'Prikaži događaje obuke', 'List Trainings': 'Prikaži obuke', 'List unidentified': 'Prikaži neidentifikovano', 'List Units': 'Prikaži jedinice', 'List Users': 'Prikaži korisnike', 'List Vehicle Assignments': 'Prikaži dodjele vozila', 'List Vehicle Details': 'Prikaži detalje o vozilu', 'List Vehicle Types': 'Prikaži tipove vozila', 'List Vehicles': 'Prikaži vozila', 'List Volunteer Cluster Positions': 'Prikaži pozicije skupa volontera', 'List Volunteer Cluster Types': 'Prikaži tipove skupa volontera', 'List Volunteer Clusters': 'Prikaži skupove volontera', 'List Volunteer Roles': 'Prikaži uloge volontera', 'List Volunteers': 'Prikaži volontere', 'List Warehouse Items': 'Prikaži stavke skladišta', 'List Warehouses': 'Prikaži skladišta', 'List/Add': 'Izlistaj/Dodaj', 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Popis "ko radi šta i gdje". Omogućava agencijama za pomoć da koordinišu svoje aktivnosti.', 'liter': 'litar', 'Lithuania': 'Litvanija', 'Live Help': 'Pomoć uživo', 'Livelihood': 'Izdržavanje', 'Livelihoods': 'Izdržavanja', 'LMS Administration': 'LMS administracija', 'Load': 'Učitaj', 'Load Cleaned Data into Database': 'Unesi očišćene podatke u bazu podataka', 'Load Raw File into Grid': 'Učitaj neobrađenu datoteku u mrežu', 'Loaded By': 'Učitao', 'Loading': 'Učitavam', 'Loading Equipment': 'Učitavanje opreme', 'Loading Locations...': 'Učitavam lokacije...', 'Local Currency': 'Lokalna valuta', 'Local Name': 'Lokalni naziv', 'Local Names': 'Lokalna imena', 'Location': 'Lokacija', 'Location (Site)': 'Lokacija (mjesto)', 'Location 1': 'Lokacija 1', 'Location 2': 'Lokacija 2', 'Location Added': 'Lokacija dodana', 'Location added': 'Lokacija dodana', 'Location added to Organization': 'Lokacija dodana organizacioji', 'Location cannot be converted into a group.': 'Lokacija ne može biti pretvorena u grupu.', 'Location deleted': 'Lokacija obrisana', 'Location Deleted': 'Izbrisana lokacija', 'Location Detail': 'Detalji lokacije', 'Location Details': 'Detalji lokacije', 'Location Group': 'Grupa lokacija ', 'Location group cannot be a parent.': 'Lokacijska grupa ne može biti roditelj.', 'Location group cannot have a parent.': 'Grupa lokacija ne može imati roditelja', 'Location groups can be used in the Regions menu.': 'Grupe lokacija se mogu koristiti u meniju regija.', 'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Lokacije grupa mogu biti korištene za filtriranje prikaza na mapi i u pretrazi rezultata samo na entitetima pokrivenim lokacijama grupe.', 'Location Hierarchies': 'Hijerarhije lokacija', 'Location Hierarchy': 'Hijerarhija lokacija', 'Location Hierarchy added': 'Hijerarhija lokacija dodana', 'Location Hierarchy deleted': 'Hijerarhija lokacija obrisana', 'Location Hierarchy Level 0 Name': 'Ime hijerarhijske lokacije nultog nivoa', 'Location Hierarchy Level 1 Name': 'Naziv nivoa 1 u hijerarhiji lokacija', 'Location Hierarchy Level 2 Name': 'Ime hijerarhijske lokacije drugog stepena', 'Location Hierarchy Level 3 Name': 'Ime lokacije 3. hijerarhijskog nivoa', 'Location Hierarchy Level 4 Name': 'Ime nivoa 4 u hijerahiji položaja', 'Location Hierarchy Level 5 Name': 'Naziv lokacije hijerarhijskog nivoa 5', 'Location Hierarchy updated': 'Hijerarhija lokacija ažurirana', 'Location is of incorrect level!': 'Lokacija je na neispravnom nivou', 'Location is Required!': 'Zahtijeva se lokacija', 'Location needs to have WKT!': 'Lokacija treba imati WKT!', 'Location removed from Organization': 'Lokacija uklonjena iz organizacije', 'Location Required!': 'Zahtijeva se lokacija!', 'Location updated': 'Lokacija ažurirana', 'Location:': 'Lokacija:', 'Location: ': 'Lokacija: ', 'Locations': 'Lokacije', 'Locations De-duplicator': 'Deduplikator lokacija', 'Locations of this level need to have a parent of level': 'Lokacije ovog nivoa moraju imati roditelja nivoa', 'Locations should be different!': 'Lokacije trebaju biti različite', 'Lockdown': 'Zaključavanje', 'Loctaion of tip': 'Lokacija savjeta', 'Log': 'Zapisnik', 'Log Entry': 'Element zapisnika', 'Log entry added': 'Unos je dodan', 'Log entry deleted': 'Polje za unos izbrisano', 'Log Entry Deleted': 'Stavka zapisnika izbrisana', 'Log Entry Details': 'Detalji stavki zapisnika', 'Log entry updated': 'Unos je ažuriran', 'Log Time Spent': 'Provedeno vrijeme prijave', 'Logged By': 'Evidentirao', 'Logged Time': 'Vrijeme prijave', 'Logged Time Details': 'Detalji vremena prijave', 'Login': 'Prijava', 'login': 'prijava', 'Login using Facebook account': 'Prijava koristeći Facebook nalog', 'Login using Google account': 'Prijava koristeći Google nalog', 'Login with Facebook': 'Prijava na Facebook', 'Login with Google': 'Prijava preko Google', 'Logistics': 'Logistika', 'Logistics Management': 'Upravljane Logistikom', 'Logistics Management System': 'Sistem logističke uprave', 'Logo file %s missing!': 'Nedostaje %s logo datoteka', 'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400': 'Logotip organizacije. To treba biti png ili jpeg datoteka i ne treba biti veći od 400x400', 'Logout': 'Odjavi se', 'long': 'dugi', 'Long Name': 'Dugo ime', 'Long Text': 'Dug Tekst', 'Long-term care': 'Dugoročna briga', 'long>12cm': 'dugo>12cm', 'Longitude': 'Geografska dužina', 'Longitude is Invalid!': 'Geografska dužina je neispravna', 'Longitude is West - East (sideways).': 'Geografska dužina je Zapad - Istok (horizontalno)', 'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': 'Geografska dužina je Zapad - istog (postrance). Geografska širina je nula na ekvatoru i pozitivna je je na sjevernoj polulopti a', 'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Geografska dužina je zapad - istok. Geografska dužina je nula na glavnom meridijanu (Vrijeme po Griniču) i pozitivna je prema istoku, preko Evrope i Azije. Geografska dužina je negativna na zapadu, preko Atlantika i Amerika.', 'Longitude is West-East (sideways).': 'Geografska dužina: Zapad-istok (horizontalno)', 'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Geografska dužina je nula na glavnom meridijanu (Vrijeme po Griniču) i pozitivna je prema istoku, preko Evrope i Azije. Geografska dužina je negativna na zapadu, preko Atlantika i Amerika.', 'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Geografska dužina je jednaka nuli na prvom meridijanu (kroz Grinvič, Velika Britanija) i pozitivna je prema istoku, preko Evrope i Azije. Geografska dužina je negativna prema zapadu, preko Atlantika i Amerike.', 'Longitude must be between -180 and 180.': 'Geografska dužina mora biti broj između -180 i 180', 'Longitude of far eastern end of the region of interest.': 'Geografska dužina istočnog dijela posmatrane regije', 'Longitude of far western end of the region of interest.': 'Geografska dužina krajnje desne/zapadne tačke regiona o kom je riječ', 'Longitude of Map Center': 'Geografska dužina centra mape', 'Longitude should be between': 'Geografska dužina treba biti između', 'Looting': 'Pljačkanje', 'Lost': 'Izgubljeno', 'Lost Password': 'Izgubljena lozinka', 'low': 'nisko', 'Low': 'Nisko', 'Low Tide Depth': 'Dubina niske plime', 'Luxembourg': 'Luksemburg', 'Macedonia': 'Makedonija', 'Machine with which data was exchanged.': 'Mašina s kojom su podaci razmijenjeni.', 'Madagascar': 'Madagaskar', 'Magnetic Storm': 'Magnetna Oluja', 'Mailing list': 'Dopisna lista', 'Mailing list added': 'Lista elektronske pošte dodana', 'Mailing list deleted': 'Lista elektronske pošte obrisana', 'Mailing List Details': 'Detalji liste za slanje poruka', 'Mailing List Name': 'Ime liste za slanje poruka', 'Mailing list updated': 'Lista elektronske pošte ažurirana', 'Mailing Lists': 'Dopisne liste', 'Main cash source': 'Glavni izvor gotovine', 'Main income sources before disaster': 'Glavni izvori prihoda prije nepogode', 'Main?': 'Glavni?', 'Mainstreaming DRR': 'Opšte prihvatanje smanjenjearizika katastrofe', 'Major': 'Bitan', 'Major Damage': 'Značajna šteta', 'Major expenses': 'Glavni troškovi', 'Major outward damage': 'Velika vanjska šteta', 'Make a request': 'Kreiraj zahtjev', 'Make a Request for Aid': 'Kreiraj zahtjev za pomoć', 'Make Commitment': 'Kreiraj zaduženje', 'Make New Commitment': 'Kreiraj novo zaduženje', 'Make People Request': 'Napravi zahtjev za ljudima', 'Make Pledge': 'Obećati podršku', 'Make preparations per the <instruction>': 'Kreirajti pripreme po oznaci <instruction>', 'Make Request': 'Pošalji Zahtjev', 'Make Supplies Request': 'Napravi zahtjev za zalihama', 'Malawi': 'Malavi', 'Malaysia': 'Malezija', 'Maldives': 'Maldivi', 'male': 'Muško', 'Male': 'Muškarac', 'Malnutrition present prior to disaster': 'Neuhranjenost prisutna prije katastrofe', 'Manage': 'Upravljajte', 'Manage Cache': 'Upravljanje kešom', 'Manage Events': 'Upravljaj Događajima', 'Manage Images': 'Upravljaj slikama', 'Manage Incidents': 'Upravljanje incidentima', 'Manage Item catalog': 'Upravljaj katalogom stavki', 'Manage Kits': 'Upravljanje kompletima', 'Manage Layers in Catalog': 'Upravljanje slojevima u katalogu', 'Manage Relief Item Catalogue': 'Upravljanje katalogom humanitarne robe', 'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Upravljajte zahtjevima za zalihe, sredstvima, osobljem ili zahtjevima za druge resurse. Poklapanja sa inventarima zaliha se zahtijevaju.', 'Manage requests of hospitals for assistance.': 'Upravljanje zahtjevima bolnica za pomoć.', 'Manage Returns': 'Upravljanje povratima', 'Manage Sub-Category': 'Upraljaj potkategorijama', 'Manage Users & Roles': 'Upravljanje korisnicima i ulogama', 'Manage Vehicles': 'Upravljaj vozilima', 'Manage volunteers by capturing their skills, availability and allocation': 'Upravljaj volonterima vodeći računa o njihovim vještinama, dostupnosti i raspodjeli', 'Manage Warehouses/Sites': 'Upravljanje skladištima/položajima', 'Manage Your Facilities': 'Upravljanje vašim objektima', 'Manager': 'Menadžer', 'Managing Office': 'Ured upravljanja', 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Obavezno. U GeoServer, ovo je ime sloja. Unutar WFS getCapabilities, ovo je dio sa FeatureType imenom nakon dvotačke(:).', 'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wfs?': 'Obavezno. Bazni URL za pristup servisu, npr. http://host.domain/geoserver/wfs?', 'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wms?': 'Obavezno. Bazni URL za pristup servisu, npr. http://host.domain/geoserver/wms?', 'Mandatory. The URL to access the service.': 'Obavezno. URL za pristup usluzi.', 'manicured': 'manikiran', 'manual': 'ručno', 'Manual': 'Priručnik', 'Manual Synchronization': 'Ručna sinhronizacija', 'Manual synchronization completed.': 'Ručna sinhronizacija završena', 'Manual synchronization scheduled - refresh page to update status.': 'Raspoređena ručna sinhronizacija - osvježite stranicu da ažurirate status', 'Manual synchronization started in the background.': 'Ručna sinhronizacija započeta u pozadini.', 'Many': 'Mnogo', 'Map': 'Karta', 'Map cannot display without prepop data!': 'Mapa se ne može prikazati bez pripremljenih podataka', 'Map Center Latitude': 'Geografska širina središta mape', 'Map Center Longitude': 'Geografska dužina centra mape', 'Map Profile': 'Konfiguracija karte', 'Map Profile added': 'Dodana konfiguracija mape', 'Map Profile deleted': 'Obrisana konfiguracija mape', 'Map Profile Details': 'Detalji o konfiguraciji mape', 'Map Profile removed': 'Konfiguracija Karte izbrisana', 'Map Profile updated': 'Ažurirana konfiguracija mape', 'Map Profiles': 'Konfiguracija Karte', 'Map has been copied and set as Default': 'Mapa je kopirana i postavljena kao podrazumijevana', 'Map has been set as Default': 'Mapa je postavljena kao podrazumijevana', 'Map Height': 'Visina karte', 'Map is already your Default': 'Mapa je već podrazumijevana', 'Map not available: Cannot write projection file - %s': 'Mapa nije dostupna: Ne mogu pisati datoteku projekcije - %s', 'Map not available: No Projection configured': 'Mapa nije dostupna: nema konfigurisane projekcije', 'Map not available: Projection %(projection)s not supported - please add definition to %(path)s': 'Mapa nije dostupna: Projekcija %(projection)s nije podržana - molim dodajte definiciju u %(path)s', 'Map of Base Stations': 'Mapa baznih stanica', 'Map of Communities': 'Mapa zajednica', 'Map of Facilities': 'Mapa Objekata', 'Map of Hospitals': 'Karta bolnica', 'Map of Incident Reports': 'Mapa izvještaja o incitentu', 'Map of Offices': 'Mapa kancelarija', 'Map of Projects': 'Mapa projekata', 'Map of Requests': 'Mapa zahtjeva', 'Map of Resources': 'Mapa resursa', 'Map of Vehicles': 'Mapa vozila', 'Map of Warehouses': 'Mapa skladišta', 'Map Service Catalogue': 'Katalog usluga mape', 'Map Settings': 'Postavke karte', 'Map Viewing Client': 'Klijent za pregled mapa', 'Map Width': 'Širina mape', 'Map Zoom': 'Uvećanje mape', 'Mapa': 'Mapa', 'MapMaker Hybrid Layer': 'MapMaker hibridni sloj', 'MapMaker Layer': 'Sloj MapMaker', 'Mapping': 'Mapiranje', 'Maps': 'Mape', 'Marine Security': 'Pomorska sigurnost', 'Marital Status': 'Bračno stanje', 'Mark as duplicate': 'Označite kao duplo', 'Mark Sender': 'Označi pošiljaoca', 'Marker added': 'Marker dodan', 'Marker deleted': 'Marker obrisan', 'Marker Details': 'Detalji markera', 'Marker Levels': 'Nivoi markera', 'Marker updated': 'Marker ažuriran', 'Markers': 'Markeri', 'married': 'vjenčan', 'Marshall Islands': 'Maršal ostrva', 'Master': 'Glavni', 'Master Message Log': 'Master zapisnik poruka', 'Master Message Log to process incoming reports & requests': 'Glavni zapisnika poruka za obradu ulaznih izvještaja i zahtjeva', 'Match Percentage': 'Postotak poklapanja', 'Match percentage indicates the % match between these two records': 'Odgovarajući postotak ukazuje na % podudaranja između ova dva zapisa', 'Match Requests': 'Uskladi zahtjeve', 'Match?': 'Slaganje?', 'Matching Catalog Items': 'Odgovarajuće Stavke Kataloga', 'Matching Items': 'Uparene stavke', 'Matching Records': 'Odgovarajući zapisi', 'Matching Vehicle Types': 'Usklađeni tipovi vozila', 'Matrix of Choices (Multiple Answers)': 'Matrica izbora (više odgovora)', 'Matrix of Choices (Only one answer)': 'Matrica izbora (samo jedan odgovor)', 'Matrix of Text Fields': 'Matrica tekstualnih polja', 'Mauritania': 'Mauritanija', 'Mauritius': 'Mauricijus', 'Max Height': 'Maksimalna visina', 'Max Persons per Dwelling': 'Maksimalni broj osoba po jedinici smještaja', 'maxExtent': 'maksimalni obim', 'Maximum': 'Maksimum', 'Maximum Extent': 'Maksimalna širina', 'Maximum Location Latitude': 'Maksimalna geografska širina lokacije', 'Maximum Location Longitude': 'Maksimalna geografska dužina lokacije', 'maxResolution': 'maksimalnaRezolucija', 'Measure Area: Click the points around the polygon & end with a double-click': 'Područje mjerenja: Kliknite na tačke oko poligona i završite s dvostrukim klikom', 'Measure Length: Click the points along the path & end with a double-click': 'Dužina mjerenja: Kliknite na tačke oko staze i završite s dvostrukim klikom', 'Measures': 'Mjere', 'Media Manager': 'Menadžer medija', 'Medical and public health': 'Medicina i javno zdravstvo', 'Medical Conditions': 'Medicinski uslovi', 'Medicine': 'Medicina', 'Medium': 'Srednje', 'medium': 'srednji', 'medium<12cm': 'srednje<12cm', 'Megabytes per Month': 'Megabajta po mjesecu', 'Member Organizations': 'Organizacije članice', 'Members': 'Članovi', 'Membership': 'Članstvo', 'Membership added': 'Dodano članstvo', 'Membership deleted': 'Članstvo izbrisano', 'Membership Details': 'Detalji o članstvu', 'Membership updated': 'Ažurirano članstvo', 'Memberships': 'Članstva', 'Mensajería': 'Slanje poruka', 'Mental': 'Mentalno', 'Menu': 'Meni', 'menu item': 'stavka menija', 'Merge': 'Spoji', 'Merge records': 'Spoji zapise', 'Message': 'Poruka', 'message': 'poruka', 'Message added': 'Dodana poruka', 'Message deleted': 'Poruka obrisana', 'Message Details': 'Detalji poruke', 'Message Log': 'Zapisnik poruka', 'Message Source': 'Izvor poruke', 'Message updated': 'Poruka ažurirana', 'Message variable': 'Varijabla poruke', 'Message Variable': 'Promjenjiva poruke', 'Messages': 'Poruke', 'Messaging': 'Slanje poruka', 'Messaging Module': 'Modul poruka', 'Messaging settings updated': 'Ažurirana podešenja razmjene poruka', 'Metadata': 'Meta podaci', 'Meteorite': 'Meteor', 'Meteorological (inc. flood)': 'Meteorološki (uklj. poplave)', 'meter': 'metar', 'meter cubed': 'kubni metar', 'meters': 'metara', 'Method used': 'Metode korištene', 'Mexico': 'Meksiko', 'MGRS Layer': 'MGRS sloj', 'Micronutrient malnutrition prior to disaster': 'Neuhranjenost mikroelementima prisutna prije katastrofe', 'middle': 'sredina', 'Middle Name': 'Srednje ime', 'Migrants or ethnic minorities': 'Imigranti ili etničke manjine', 'Mileage': 'Kilometraža', 'Milestone': 'Prekretnica', 'Milestone Added': 'Prekretnica dodana', 'Milestone Deleted': 'Prekrednica izbrisana', 'Milestone Details': 'Detalji prekretnice', 'Milestone Updated': 'Prekretnica ažurirana', 'Milestones': 'Prekretnice', 'Military': 'Vojni', 'Minimum': 'Minimum', 'Minimum Bounding Box': 'Minimalna uokviravajuća kutija', 'Minimum Location Latitude': 'Minimalna geografska širina lokacije', 'Minimum Location Longitude': 'Minimalna geografska dužina lokacije', 'Minimum shift time is 6 hours': 'Minimalno vrijeme do smjene je 6 sati', 'Minor Damage': 'Manja šteta', 'Minor/None': 'Minorno/ništa', 'Minorities participating in coping activities': 'Manjine koje učestvuju u akcijama suočavanja', 'Minute': 'minuta', 'Minutes must be a number between 0 and 60': 'Broj minuta mora biti između 0 i 60', 'Minutes must be a number.': 'Minuta mora biti broj', 'Minutes must be less than 60.': 'Minute bi trebale biti broj manji od 60', 'Minutes per Month': 'Minute po mjesecu', 'Minutes should be a number greater than 0 and less than 60': 'Minute bi trebale biti broj veći od nula i manji od 60', 'Minutes should be greater than 0 and less than 60': 'Minute bi trebale biti broj veći od nula i manji od 60', 'Miscellaneous': 'Razno', 'misshapen': 'deformisano', 'missing': 'nedostaje', 'Missing': 'Nestalo', 'Missing Person': 'Nestala osoba', 'Missing Person Details': 'Detalji o nestaloj osobi', 'Missing Person Registry': 'Registar nestalih osoba', 'Missing Person Reports': 'Izvještaji o nestalim osobama', 'Missing Persons': 'Nestale osobe', 'Missing Persons Registry': 'Registar nestalih osoba', 'Missing Persons Report': 'Izvještaj o nestalim osobama', 'Missing Report': 'Nedostajući izvještaj', 'Missing Senior Citizen': 'Izgubljen stariji građanin', 'Missing Vulnerable Person': 'Ranjiva osoba nestala', 'Mission': 'Misija', 'Mission added': 'Dodana misija', 'Mission deleted': 'Misija izbrisana', 'Mission Details': 'Detalji zadatka', 'Mission Record': 'Zapis misije', 'Mission updated': 'Misija ažurirana', 'Missions': 'Misije', 'mixed': 'izmiješano', 'Mobile': 'Pokretno', 'Mobile Assess.': 'Mobilna procjena.', 'Mobile Basic Assessment': 'Mobilna osnovna procjena', 'Mobile Commons (Inbound)': 'Mobile Commons (Ulazna)', 'Mobile Commons Setting added': 'Uobičajene mobilne postavke dodane', 'Mobile Commons Setting deleted': 'Uobičajene mobilne postavke obrisane', 'Mobile Commons Setting Details': 'Detalji za postavke Mobile Commons', 'Mobile Commons Settings': 'Postavke za Mobile Commons', 'Mobile Commons settings updated': 'Uobičajene mobilne postavke ažurirane', 'Mobile Commons SMS Settings': 'Uobičajene SMS postavke', 'Mobile Phone': 'Mobilni Telefon', 'Mobile Phone #': 'Broj mobitela', 'Mobile Phone Number': 'Broj mobilnog telefona', 'Mode': 'Način rada', 'Model/Type': 'Model/Tip', 'Modem Settings': 'Postavke modema', 'Modem settings updated': 'Uobičajene mobilne postavke ažurirane', 'Moderate': 'Umjereno', 'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': 'Izmjena značajke: Odaberite karakteristiku koju želite deformisati i prevucite tačke da deformišete karakteristiku na izabran način', 'Modify Information on groups and individuals': 'Modifikuj informacije o grupama i pojedincima', 'Modifying data in spreadsheet before importing it to the database': 'Modificiranje podataka u tabeli prije njihovog importovanja u bazu.', 'Module': 'Modul', 'Module Administration': 'Administracija modula', 'module allows the site administrator to configure various options.': 'modul omogućava administratoru stranice da prilagodi razne opcije.', 'Module disabled!': 'Modul je isključen.', 'module helps monitoring the status of hospitals.': 'Modul pomaže nadgledanju statusa bolnica', 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': "modul pruža mehanizam da se zajednički omogući pregled katastrofe u toku, koristeći 'online' mapiranje (Geografski Informacijski Sistem (GIS) )", 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS). You can add markers and pictures to pin point incidents on the map.': "modul pruža mehanizam da se zajednički omogući pregled katastrofe u toku, koristeći 'online' mapiranje (Geografski Informacijski Sistem (GIS) ). Možete dodati markere i slike da pokažete incidente na mapi.", 'Module provides access to information on current Flood Levels.': 'Modul omogućava pristup informacijama o trenutnim nivoima poplave.', 'Module-wise Percentage of Translated Strings': 'Procenat prevedenosti stringova po modulu', 'Moldova': 'Moldavija', 'Monaco': 'Monako', 'Monday': 'Ponedjeljak', 'Monetization': 'Novčana vrijednost', 'Monetization Details': 'Detalji novčane vrijednosti', 'Monetization Report': 'Izvještaj vrijednosti', 'Mongolia': 'Mongolija', 'mongoloid': 'mongoloid', 'Montenegro': 'Crna Gora', 'Month': 'Mjesec', 'Monthly': 'Mjesečno', 'Monthly Cost': 'Mjesečni troškovi', 'Monthly Salary': 'Mjesečna primanja', 'Months': 'Mjeseci', 'more': 'više', 'More Info': 'Više Informacija', 'More Options': 'Više opcija', 'more...': 'više...', 'Morgue': 'Mrtvačnica', 'Morgue added': 'Mrtvačnica dodana', 'Morgue deleted': 'Mrtvačnica obrisana', 'Morgue Details': 'Detalji o mrtvačnici', 'Morgue Status': 'Status Mrtvačnice', 'Morgue Units Available': 'Mrtvačnice na raspolaganju', 'Morgue updated': 'Mrtvačnica ažurirana', 'Morgues': 'Mrtvačnice', 'Morocco': 'Maroko', 'Mosque': 'Džamija', 'Motorcycle': 'Motocikl', 'Moustache': 'Brkovi', 'Mouth, Size': 'Usta, veličina', 'Move Feature: Drag feature to desired location': 'Premještanje karakteristike: Prevucite karakteristiku na željenu lokaciju.', 'Movements (Filter In/Out/Lost)': 'Kretanja (Filter U/Van/Izgubljeno)', 'Mozambique': 'Mozambik', 'Multi-Option': 'Više opcija', 'Multiple': 'Višestruko', 'Multiple Choice (Multiple Answers)': 'Višestruki izbor (više odgovora)', 'Multiple Choice (Only One Answer)': 'Višestruki izbor (samo jedan odgovor)', 'Multiple Matches': 'Višestruko poklapanje', 'Multiple Text Fields': 'Višestruka tekstualna polja', 'Multiplicator': 'Multiplikator', 'MultiPolygon': 'VišePoligonski', 'Muslim': 'Musliman', 'Must a location have a parent location?': 'Mora li lokacija imati lokaciju roditeljsku lokaciju?', 'My Bookmarks': 'Moje zabilješke', 'My Current function': 'Moja Trenutna funkcija', 'My Details': 'Moji detalji', 'My Logged Hours': 'Moji evidentirani sati', 'My Maps': 'Moje mape', 'My Open Tasks': 'Moji otvoreni zadaci', 'My Profile': 'IMoj profil', 'My Tasks': 'Moji zadaci', 'My Volunteering': 'Moje volontiranje', 'Myanmar': 'Mjanmar', 'Módulo de Tickets': 'Modul s karticama', 'n/a': 'nije dostupno', 'N/A': 'N/D', 'Nagorno-Karabakh': 'Nagorno-Karabah', 'Name': 'Naziv', 'Name and/or ID': 'Ime i/ili broj LK', 'Name and/or ID Label': 'Ime i/ili ID oznaka', 'Name field is required!': 'Polje s imenom je obavezno', 'Name for your Twilio Account.': 'Ime za vaš Twilio nalog.', 'Name of a programme or another project which this project is implemented as part of': 'Ime programa ili drugog projekta čiji je ovaj projekt dio', 'Name of Award': 'Ime nagrade', 'Name of Driver': 'Ime vozača', 'Name of Father': 'Ime oca', 'Name of Institute': 'Ime institucije', 'Name of Map': 'Ime mape', 'Name of Mother': 'Ime majke', 'Name of Storage Bin Type.': 'Ime korpe za smještaj', 'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Naziv datoteke (i opcionalno putanja) koja će biti korištena kao pozadina zaglavlja.', 'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Ime datoteke (i opcionalna podstaza ) lociranog u statičnom mjestu , koji bi trebao biti korišten za lijevu gornju sliku.', 'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Ime datoteke (i eventualna podstaza) smještena u pogledima koja se može koristiti za zaglavlje na dnu stranice', 'Name of the person in local language and script (optional).': 'Ime osobe na maternjem jeziku', 'Name of the repository (for you own reference)': 'Ime repozitorija (za vašu vlastitu referencu)', 'Name, Org and/or ID': 'Ime, Org i/ili ID', 'Name/Model/Type': 'Ime/Model/Tip', 'Names can be added in multiple languages': 'Imena mogu biti dodana na više jezika', 'Namibia': 'Namibija', 'narrow': 'usko', 'National': 'Nacionalno', 'National ID Card': 'Lična karata', 'National NGO': 'Nacionalna NVO', 'National Staff': 'Nacionalno osoblje', 'Nationality': 'Nacionalnost', 'Nationality of the person.': 'Nacionalnost ili državljanstvo osobe.', 'natural hazard': 'prirodni rizik', 'Nautical Accident': 'Pomorska nesreća', 'Nautical Hijacking': 'Nautičke otmice', 'NDRT (National disaster response teams)': 'NDRT (Nacionalni tim za odgovor u slučaju katastrofa)', 'Neck, Length': 'Vrat, dužina', 'Neck, Peculiarities': 'Vrat, specifičnosti', 'Neck, Shape': 'Vrat, oblik', "Need a 'url' argument!": "Potreban 'url' argument", 'Need added': 'Potreba dodana', 'Need deleted': 'Potreba obrisana', 'Need to be logged-in to be able to submit assessments': 'Potrebno je da budete prijavljeni da biste podnijeli procjenu', 'Need to configure Twitter Authentication': 'Potrebno je izvršiti konfiguraciju Twitter autentičnosti', 'Need to specify a budget!': 'Pogtrebno je navesti budžet!', 'Need to specify a Budget!': 'Pogtrebno je navesti budžet!', 'Need to specify a bundle!': 'Neophodno je naznačiti paket!', 'Need to specify a feature group!': 'Morate navesti grupu karakteristika', 'Need to specify a group!': 'Morate specifikovati grupu!', 'Need to specify a kit!': 'Potrebno je odrediti komplet!', 'Need to specify a Kit!': 'Potrebno je odrediti komplet!', 'Need to specify a location to search for.': 'Potrebno specificirati traženu lokaciju.', 'Need to specify a Resource!': 'Trebate navesti resurs!', 'Need to specify a role!': 'Mora se specificirati uloga!', 'Need to specify a table!': 'Potrebno je navesti tabelu!', 'Need to specify a user!': 'Potrebno je odrediti korisnika!', 'Need Type': 'Vrsta potreba', 'Need Type added': 'Dodan tip potrebe', 'Need Type deleted': 'Tip potreba izbrisan', 'Need Type Details': 'Potrebni detalji o tipu', 'Need Type updated': 'Vrsta potrebe ažurirana', 'Need Types': 'Tipovi potreba', 'Need updated': 'Potreba ažurirana', 'Needs': 'Potrebe', 'Needs Details': 'Detalji potreba', 'Needs elaboration!!!': 'Treba elaborirati!!!', 'Needs Maintenance': 'Potrebno održavanje', 'Needs to reduce vulnerability to violence': 'Potrebno je smanjiti ranjivost prema nasilju', 'Negative Flow Isolation': 'Negativna izolacija protoka', 'negroid': 'negroid', 'Neighborhood': 'Komšiluk', 'Neighbourhood': 'Susjedstvo', 'Neighbouring building hazard': 'Opasnosti od susjednih zgrada', 'Neonatal ICU': 'Intenzivna njega za novorođenčad', 'Neonatology': 'Neonatologija', 'Netherlands': 'Nizozemska', 'Network': 'Mreža', 'Network added': 'Mreža dodana', 'Network Details': 'Detalji mreže', 'Network removed': 'Mreža uklonjena', 'Network updated': 'Mreža ažurirana', 'Networks': 'Mreže', 'Neurology': 'Neurologija', 'Never': 'Nikada', 'never': 'nikad', 'never update': 'nikad ažurirati', 'new': 'Novo', 'New': 'Novi', 'new ACL': 'novi ACL', 'New Activity Type': 'Novi tip aktivnosti', 'New Annual Budget created': 'Kreiran novi godišnji budžet', 'New Assessment reported from': 'Izvještaj o novoj procjeni iz', 'New Body Find': 'Novo traženje tijela', 'New cases in the past 24h': 'Novi slučajevi u posljednjih 24 sata', 'New Certificate': 'Novi certifikat', 'New Checklist': 'Novi spisak', 'New Entry': 'Novi unos', 'New Entry in Asset Log': 'Nova stavka u zapisniku sredstava', 'New Event': 'Novi događaj', 'New Hazard': 'Novi rizik', 'New Home': 'Novi dom', 'New Item Category': 'Nova kategorija predmeta', 'New Job Role': 'Nova radno mjesto', 'New Location': 'Nova lokacija', 'New Location Group': 'Nova grupa lokacija ', 'New Organization': 'Nova organizacija', 'Add Output': 'Novi izlaz', 'New Page': 'Nova strana', 'New Patient': 'Novi pacijent', 'New Peer': 'Novi saradnik', 'New Post': 'Novi ubacivi tekst', 'New Problem': 'Novi problem', 'New Record': 'Novi zapis', 'new record inserted': 'novi zapis unesen', 'New Records': 'Novi zapisi', 'New Relative': 'Novi srodnik', 'New Report': 'Novi izvještaj', 'New Request': 'Novi zahtjev', 'New Role': 'Nova uloga', 'New Scenario': 'Novi scenario', 'New Sector': 'Novi sektor', 'New Service': 'Nova usluga', 'New Skill': 'Nova vještina', 'New Solution Choice': 'Izbor novog rješenja', 'New Staff Member': 'Novi član osoblja', 'New Stock Adjustment': 'Novo prilagođenje zalihe', 'New Stock Count': 'Nova količina zaliha', 'New Support Request': 'Novi zahtjev za podršku', 'New Synchronization Peer': 'Nova sinhronizacijski saradnik', 'New Team': 'Novi tim', 'New Theme': 'Nova tema', 'New Ticket': 'Nova kartica', 'New Training Course': 'Novi kurs obučavanja', 'New updates are available.': 'Nove dostupne nadogradnje.', 'New Volunteer': 'Novi Volonter', 'New Zealand': 'Novi Zeland', 'Newer Timestamp': 'Novija vremenska oznaka', 'News': 'Novosti', 'Next': 'Sljedeće', 'next 100 rows': 'Narednih 100 redova', 'Next run': 'Sljedeće pokretanje', 'Next View': 'Sljedeći prikaz', 'NGO': 'NVO', 'Nicaragua': 'Nikaragva', 'Nigeria': 'Nigerija', 'No': 'Ne', 'NO': 'NE', 'no': 'nema', 'No access at all': 'Nema nikakvog pristupa', 'No access to this record!': 'Nema pristupa ovom zapisu!', 'No Accounts currently defined': 'Trenutno nema definisanih računa', 'No action recommended': 'Nema preporučene akcije', 'No Activities currently registered in this event': 'Trenutno nema registrovanih Aktivnosti u ovom događaju', 'No Activities Found': 'Nema pronađenih aktivnosti', 'No Activity Organizations Found': 'Nema nađenih organizacija aktivnosti', 'No Activity Types Found': 'Nema nađenih tipova aktivnosti', 'No Activity Types found for this Activity': 'Nema nađenih tipova aktivnosti za ovu aktivnost', 'No Activity Types found for this Project Location': 'Nema nađenih tipova aktivnosti za ovu lokaciju projekta', 'No Addresses currently registered': 'Trenutno nema registrovanih adresa', 'No Affiliations defined': 'Nema definisanih preduzeća', 'No Aid Requests currently registered': 'Trenutno nema registrovanih zahtjeva za pomoć', 'No Airports currently registered': 'Trenutno nema registrovanih aerodroma', 'No Alternative Items currently registered': 'Nema alternativnih artikala registrovanih', 'No annual budgets found': 'Godišnji budžeti nisu nađeni', 'No Appraisals found': 'Nema nađenih poređenja opcija', 'No Assessment Answers': 'Nema odgovore ocjene', 'No Assessment Questions': 'Nema pitanja ocjene', 'No Assessment Summaries currently registered': 'Nema trenutno registrovanih procjena pregleda', 'No Assessment Templates': 'Nema predložaka ocjene', 'No Assessments currently registered': 'Trenutno nema registrovanih procjena', 'No Asset Assignments currently registered': 'Trenutno nema registrovanih sredstava', 'No Assets currently registered': 'Nema sredstva koja je trenutno registrovano', 'No Assets currently registered in this event': 'Trenutno nema registrovanih sredstava na ovom događaju', 'No Assets currently registered in this incident': 'Trenutno nema sredstava registrovanih u ovom incidentu', 'No Assets currently registered in this scenario': 'Trenutno nema sredstava registrovanih u ovom scenariju', 'No Awards found': 'Nema nađenih nagrada', 'No Base Layer': 'Nema baznog sloja', 'No Base Stations currently registered': 'Nema trenutno registrovanih baznih stanica', 'No Baseline Types currently registered': 'Trenutno nije registriran nijedan tip referentne tačke', 'No Baselines currently registered': 'Nijedna referentnu tačku trenutno registrovana', 'No Beneficiaries Found': 'Nema nađenih korisnika', 'No Beneficiary Types Found': 'Nema nađenih tipova korisnika', 'No Branch Organizations currently registered': 'Nema trenutno registrovanih ogranaka organizacija', 'No Brands currently registered': 'Nema trenutno registrovanih marki', 'No Budgets currently registered': 'Nema prijavljenih budžeta trenutno', 'No Bundles currently registered': 'Nema registrovanih paketa', 'No Camp Services currently registered': 'Trenutno nema registrovanih usluga u kampu', 'No Camp Statuses currently registered': 'Trenutno nema registrovanih statusa kampa', 'No Camp Types currently registered': 'Nije registrovan nikakav tip kampa', 'No Campaign Messages Found': 'Nema nađenih poruka kampanje', 'No Campaigns Found': 'Nema nađenih kampanja', 'No Camps currently registered': 'Nijedan Kamp nije trenutno registrovan', 'No Cases found': 'Nema nađenih slučajeva', 'No Catalog Items currently registered': 'Trenutno nije registrovan katalog sa stavkama', 'No Catalogs currently registered': 'Nema trenutno registrovanih kataloga', 'No Checklist available': 'Nijedna kontrolna lista nije dostupna', 'No Cluster Subsectors currently registered': 'Nijedan podsektor skupa trenutačno registrovan', 'No Clusters currently registered': 'Trenutno nema registrovanih skupova', 'No Coalitions currently recorded': 'Nema trenutnio zabilježenih koalicija', 'No Commitment Items currently registered': 'Trenutno nema registriranih stavki zaduženja', 'No Commitments': 'Nema zaduženja', 'No Communities Found': 'Nema nađenih zajednica', 'No Completed Assessment Forms': 'Nema završenih formulara ocjene', 'No Configs currently defined': 'Trenutno nema definisanih konfiguracija', 'No conflicts logged': 'Nisu zabilježeni konflikti', 'No contact information available': 'Nisu dostupne informacije o kontaktu', 'No contact method found': 'Nije pronađena metoda kontakta', 'No Contacts currently registered': 'Nema registriranih kontakata', 'No contacts currently registered': 'Nema registriranih kontakata', 'No Contacts Found': 'Nema nađenih kontakta', 'No contacts yet defined for this site': 'Kontakti još nisu definisani za ovo mjesto', 'No Credentials currently set': 'Nisu postavljeni nijedni akreditivi', 'No data available': 'Nema dostupnih podataka', 'No Data currently defined for this Theme Layer': 'Nema definisanih podataka za ovaj tematski sloj', 'No data in this table - cannot create PDF!': 'Nema podataka u ovoj tabeli - ne može se kreirati PDF!', 'No databases in this application': 'Nema baza podataka u ovom zahtjevu', 'No dead body reports available': 'Nijedan izvještaj o mrtvim tijelima nije dostupan', 'No Details currently registered': 'Trenutno nema registrovanih detalja', 'No Disaster Assessments': 'Nema procjena katastrofe', 'No Distribution Items Found': 'Nisu pronađene stavke raspodjele', 'No Distributions currently registered': 'Trenutno nema registrovanih raspodjela', 'No Distributions Found': 'Nema nađenih raspodjela', 'No Documents currently attached to this request': 'Trenutno nema dokumenata koji su priloženi uz ovaj zahtjev', 'No Documents found': 'Nijedan dokument nije pronađen', 'No Donations': 'Nema donacija', 'No Donors currently registered': 'Trenutno nema registrovanih donatora', 'No education details currently registered': 'Nema trenutno registrovanih nivoa obrazovanja', 'No Education Levels currently registered': 'Nema trenutno registrovanih nivoa obrazovanja', 'No Emails currently in InBox': 'Trenutno nema elektronske pošte u ulaznom sandučetu', 'No Emails currently in Outbox': 'Trenutno nema e-mail poruka u izlaznom sandučetu', 'No Emails currently in Sent': 'Nema trenutno elektronske pošte za slanje', 'No entries currently available': 'Nema trenutno dostupnih unosa', 'No entries found': 'Ništa nije uneseno', 'No entries matching the query': 'Nema stavki vezanih za upit', 'No entry available': 'Nema dostupnog unosa', 'No Event Types currently registered': 'Nijedan tip događaja nije trenutno registrovan', 'No Events currently registered': 'Nema registriranih događaja', 'No Facilities currently registered': 'Trenutno nema registrovanih objekata', 'No Facilities currently registered in this event': 'Nema trenutno registriranih objekata za ovaj događaj', 'No Facilities currently registered in this incident': 'U ovom incidentu trenutno nema registrovanih objekata', 'No Facilities currently registered in this scenario': 'U ovom scenariju trenutno nema registrovanih postrojenja', 'No Facility Types currently registered': 'Nijedan tip objekta nije trenutno registrovan', 'No Feature Classes currently defined': 'Nijedna klasa karakteristika nije trenutno definisana.', 'No Feature Groups currently defined': 'Nijedna klasa mogućnosti nije trenutno definisana.', 'No Feature Layers currently defined': 'Nema trenutno definisanih slojeva karakteristika', 'No file uploaded.': 'Nema poslane datoteke,', 'No Flood Reports currently registered': 'trenutno nema registriranih izvještaja o poplavi', 'No forms to the corresponding resource have been downloaded yet.': 'Još uvijek nisu preuzeti obrasci za odgovarajuće resurse', 'No further users can be assigned.': 'Daljnji korisnici se ne mogu dodijeliti', 'No GPS data currently registered': 'Nema GPS podataka trenutno', 'No Group Memberships currently registered': 'Trenutno nema registrovanih članstava u grupi', 'No Groups currently defined': 'Trenutno nema definisanih grupa', 'No Groups currently registered': 'Trenutno nema registrovanih grupa', 'No Hazards currently registered': 'Trenutno nema registrovanih rizika', 'No Hazards found for this Project': 'Nema nađenih rizika za ovaj projekt', 'No Heliports currently registered': 'Trenutno nema registrovanih helikopterskih sletišta', 'No Homes currently registered': 'Nijedan dom trenutno registrovan', 'No Hospitals currently registered': 'Nema registriranih bolnica', 'No Human Resources currently assigned to this incident': 'Ljudski resursi nisu dodijeljeni ovom incidentu', 'No Human Resources currently registered in this event': 'Nema ljudskih resursa trenutno registriranih za ovaj događaj', 'No Human Resources currently registered in this scenario': 'Nema ljudskih resursa trenutno registrovanih u ovom scenariu', 'No Identification Report Available': 'Nema dostupnog izvještaja o identifikaciji', 'No Identities currently registered': 'Nema trenutno registriranih identiteta', 'No Image': 'Nema fotografije', 'No Images currently registered': 'Nema trenutno registrovanih slika', 'No Impact Types currently registered': 'Nijedan tip utjecaja nije trenutačno registrovan.', 'No Impacts currently registered': 'Nema trenutno zabilježenih utjecaja', 'No Import Files currently uploaded': 'Trenutno nema dodanih datoteka za uvoz', 'No import jobs': 'Nema poslova uvoza', 'No Incident Reports currently registered': 'Nema trenutno registrovanih izvještaja o incidentima', 'No Incident Reports currently registered for this event': 'Nema trenutno registrovanih izvještaja o incidentu za ovaj događaj', 'No Incident Reports currently registered in this incident': 'Trenutno nema izvještaja o incidentu registrovanih u ovom incidentu', 'No Incident Types currently registered': 'Trenutno nema registrovanih tipova incidenta', 'No Incidents currently registered in this event': 'Trenutno nema registrovanih incidenata na ovom događaju', 'No Incoming Shipments': 'Nema dolazećih pošiljki', 'No Inventories currently have suitable alternative items in stock': 'Nijedan inventar trenutno nema odgovarajuću zamjensku stavku u zalihama', 'No Inventories currently have this item in stock': 'Nijedan inventar trenutno nema ovu stavku u zalihama', 'No Inventory Stores currently registered': 'Broj registrovanih stavki u inventaru', 'No Item Catalog Category currently registered': 'Trenutno nema registrovanih predmetnih kategorija', 'No Item Catalog currently registered': 'Trenutno nema kataloga predmeta', 'No Item Categories currently registered': 'Trenutno nema registrovanih predmetnih kategorija', 'No Item currently registered': 'Nema trenutno registrovanih stavki', 'No Item Packets currently registered': 'Nema trenutno registrovanih paketa stavki', 'No Item Packs currently registered': 'Nema trenutno registrovanih paketa', 'No Item Sub-Category currently registered': 'Trenutno nema registrovanih predmetnih kategorija', 'No items currently in stock': 'Nema stavki u zalihama', 'No Items currently registered': 'Nema trenutno registrovanih stavki', 'No Items currently registered in this Inventory': 'Trenutno nema registrovanih stavki u inventaru', 'No Items currently requested': 'Nema trenutno registrovnih stavki', 'No items have been selected for shipping.': 'Nema stavki izabranih za isporuku.', 'No jobs configured': 'Nema podešenih poslova', 'No jobs configured yet': 'Još uvijek nema podešenih poslova', 'No Keys currently defined': 'Nema trenutno definisanih ključeva', 'No Keywords Found': 'Nema nađenih ključnih riječi', 'No Kits': 'Nema kompleta', 'No Kits currently registered': 'Trenutno nema registrovanih kompleta', 'No Layers currently configured in this Profile': 'Nema slojeva konfigurisanih u ovom profilu', 'No Layers currently defined': 'Nema trenutno definisanih slojeva', 'No Layers currently defined in this Symbology': 'Nema definisanih slojeva za ovo značenje simbola', 'No Level 1 Assessments currently registered': 'Nema procjene prvog nivoa koja je trenutno registrovana', 'No Level 2 Assessments currently registered': 'Nivo 2 procjene je trenutno registriran', 'No Location Hierarchies currently defined': 'Trenuitno nije definisana hijerarhija lokacija', 'No location information defined!': 'Nema definisanih informacija o lokaciji', 'No location known for this person': 'Ne postoji poznata lokacija za ovu osobu', 'No Locations currently available': 'Nema trenutno dostupnih lokacija', 'No Locations currently registered': 'Nijedna Lokacija trenutno nije registrovana', 'No Locations Found': 'Nema nađenih lokacija', 'No locations found for members of this team': 'Nisu pronađene lokacije za članove ovog tima', 'No Locations found for this Organization': 'Nisu pronađene lokacije za ovu lokaciju', 'No locations registered at this level': 'Nema registrovanih lokacija na ovom nivou', 'No log entries matching the query': 'Nema podudaranja u zapisnika za upit', 'No Mailing List currently established': 'Nema trenutno u', 'No Map Profiles currently defined': 'Nema trenutno definisane konfiguracije mape', 'No Map Profiles currently registered in this event': 'Trenutno nije registrovana konfiguracije karte u ovom događaju', 'No Map Profiles currently registered in this incident': 'Trenutno nema konfiguracija mapa registrovanih u ovom incidentu', 'No Map Profiles currently registered in this scenario': 'Nijedna konfiguracija karte nije trenutno registrovana u ovom scenariju.', 'No Markers currently available': 'Nema trenutno dostupnih markera', 'No match': 'Nema poklapanja', 'No Match': 'nema podudaranja', 'No Matching Catalog Items': 'Nema odgovarajućih kataloških stavki', 'No Matching Items': 'Nema odgovarajućih stavki', 'No Matching Records': 'Nema niti jedan zapis', 'No matching records found': 'Nisu pronađeni odgovarajući zapisi', 'No matching records found.': 'Nisu pronađeni odgovarajući zapisi', 'No Matching Vehicle Types': 'Nema odgovarajućih tipova vozila', 'No Members currently registered': 'Nema korisnika trenutno registrovanih', 'No Memberships currently defined': 'Trenutno nema definisanih članstava', 'No Memberships currently registered': 'Nema trenutno prijavljenog članstva', 'No Messages currently in InBox': 'Trenutno nema poruka u ulaznom sandučetu', 'No Messages currently in Outbox': 'Trenutno nema poruka u izlaznom sandučetu', 'No Messages currently in the Message Log': 'Nema poruka u dnevniku poruka', 'No messages in the system': 'Nema poruka u sistemu', 'No Milestones Found': 'Nema nađenih prekretnica', 'No Mobile Commons Settings currently defined': 'Mobilne postavke trenutno nisu definisane', 'No more items may be added to this request': 'Ne može se više stavki dodati na ovaj zahtjev', 'No morgues found': 'Nema nađenih mrtvačnica', 'No Need Types currently registered': 'trenutno nema registriranih tipova potrebe', 'No Needs currently registered': 'Nema trenutno registrovane potrebe', 'No Networks currently recorded': 'Nema trenutno zabilježenih mreža', 'No of Families Settled in the Schools': 'Broj porodica smještenih u školama', 'No of Families to whom Food Items are Available': 'Broj porodica za koje su dostupni prehrambeni artikli', 'No of Families to whom Hygiene is Available': 'Broj porodica kojima je higijena dostupna', 'No of Families to whom Non-Food Items are Available': 'Broj porodica za koje su dostupni neprehrambeni artikli', 'No of Female Students (Primary To Higher Secondary) in the Total Affectees': 'Broj ženskih učenika (osnovne i srednje škole) od ukupno pogođenih', 'No of Female Teachers & Other Govt Servants in the Total Affectees': 'Broj žena u nastavi i drugim vladinim uslugama u ukupno broju pogođenih', 'No of Male Students (Primary To Higher Secondary) in the Total Affectees': 'Broj muških učenika (osnovne i srednje škole) od ukupno pogođenih', 'No of Male Teachers & Other Govt Servants in the Total Affectees': 'Broj muškaraca u nastavi i drugim vladinim uslugama u ukupno broju pogođenih', 'No of Rooms Occupied By Flood Affectees': 'Broj soba koje su zauzele osobe pogođene poplavom', 'No Office Types currently registered': 'Trenutno nema registrovanih tipova kancelarija', 'No Offices currently registered': 'Nema trenutno registrovanih kancelarija', 'No Offices found!': 'Uredi nisu pronađeni!', 'No Open Tasks for %(project)s': 'Nema otvorenih zadataka za %(project)s', 'No options available': 'Nema dostupnih opcija', 'no options available': 'nema dostupnih opcija', 'No options currently available': 'Nema trenutno dostupnih opcija', 'No Orders registered': 'Nema registrovanih narudžbi', 'No Organization Domains currently registered': 'Trenutno nema registrovanih domena organizacija', 'No Organization Types currently registered': 'Trenutno nema registrovanih tipova organizacija', 'No Organizations currently registered': 'Nema trenutno registrovane organizacije', 'No Organizations for Project(s)': 'Nema organizacija za projekt(e)', 'No Organizations found for this Policy/Strategy': 'Nije nađena organizacija za ovu politiku/strategiju', 'No outputs defined': 'Nema nađenih izlaza', 'No Packets for Item': 'Nema paketa za artikle', 'No Packs for Item': 'Nema paketa za artikle', 'No Parsers currently connected': 'Nijedan parser nije trenutno povezan', 'No Partner Organizations currently registered': 'Nema trenutno registrovanih partnerskih organizacija', 'No Patients currently registered': 'Trenutno nema registrovanih pacijenata', 'No peers currently registered': 'Nema trenutno registrovanih saradnika', 'No Peers currently registered': 'Nema trenutno registrovanih suradnika', 'No pending registrations found': 'Na čekanju nema zahtjeva za registraciju', 'No pending registrations matching the query': 'Nema registracije na čekanju za vaš upit', 'No People currently committed': 'Trenutno nema posvecenih Ljudi', 'No People currently registered': 'Trenutno nema registrovanih ljudi', 'No People currently registered in this camp': 'Trenutno nema prijavljenih ljudi u ovom kampu', 'No People currently registered in this shelter': 'Za sad nema registrovanih u skloništu', 'No person record found for current user.': 'Nisu pronađeni lični podaci za trenutnog korisnika.', 'No Persons currently registered': 'Trenutno nema registrovanih osoba', 'No Persons currently reported missing': 'Trenutno nema registrovanih nestalih osoba', 'No Persons found': 'Osoba nije pronađena', 'No Photos found': 'Slike nisu nađene', 'No Picture': 'Nema Slike', 'No PoI Types currently available': 'Nema trenutno dostupnih tipova tačaka interesa', 'No Points of Interest currently available': 'Nema trenutno dostupnih tačaka interesa', 'No PoIs available.': 'Nema dostupnih tačaka interesa', 'No Policies or Strategies found': 'Nema nađenih politika ili strategija', 'No Population Statistics currently registered': 'Ne postoji registrovana statistika stanovništva', 'No Posts available': 'Nema dostupnih ubacivih tekstova', 'No posts currently available': 'Nema trenutno dostupnih ubacivih tekstova', 'No posts currently set as module/resource homepages': 'Nema ubacivih tekstova postavljenih kao početne stranice za modul/resurs', 'No posts currently tagged': 'Nema trenutno označenih ubacivih tekstova', 'No Posts currently tagged to this event': 'Ubacivi tekstovi nisu označeni za ovaj događaj', 'No Presence Log Entries currently registered': 'Trenutno nema registrovanih stavki zapisnika prisustva', 'No problem group defined yet': 'Nema još definisane grupe problema', 'No Problems currently defined': 'Nijedan problem trenutno nije definisan', 'No Professional Experience found': 'Nije nađeno profesionalno iskustvo', 'No Profiles currently have Configurations for this Layer': 'Nema profila konfigurisanih za ovaj sloj.', 'No Projections currently defined': 'Trenutno nema definisanih projekcija', 'No Projects currently registered': 'Treunutno nema registrovanih projekata', 'No projects currently registered': 'Treunutno nema registrovanih projekata', 'No Query currently defined': 'Trenutno nema definisanih upita', 'No Question Meta-Data': 'Nema metapodataka pitanja', 'No Rapid Assessments currently registered': 'Nema prijekih procjena trenutno registrovanih', 'No Ratings for Skill Type': 'Nema ocjena za tip vještine', 'No Received Items currently registered': 'Nema registritanih primljenih stavki', 'No Received Shipments': 'Nema primljenih pošiljki', 'No Records currently available': 'Trenutno nema nikakvih podataka', 'No records found': 'Nisu pronađeni zapisi', 'No records in this resource. Add one more records manually and then retry.': 'Nema zapisa u ovom resursu. Dodajte ručno jedan ili više zapisa i probajte ponovo.', 'No Records matching the query': 'Nema zapisa koji odgovaraju upitu', 'No records matching the query': 'Nema zapisa koji odgovaraju upitu', 'No records to review': 'Nije zapisa za pregled', 'No recovery reports available': 'Trenutno nema dostupnih izvještaja o pronalasku', 'No Regions currently registered': 'Trenutno nema registrovanih područja', 'No Relatives currently registered': 'Nijedan srodnik nije trenutno prijavljen', 'No report available.': 'Nema dostupnog izvještaja', 'No report specified.': 'Nema navedenog izvještaja.', 'No reports available.': 'Niti jedan izvještaj nije dostupan.', 'No reports currently available': 'Trenutno nema dostupnih izvještaja', 'No repositories configured': 'Nema podešenih repozitorija', 'No Request Items currently registered': 'Nema trenutno registrovanih stavki koje se zahtijevaju', 'No Request Shipments': 'Nema zahtijeva pošiljki', 'No Request Templates': 'Nema predložaka zahtjeva', 'No Requests': 'Nema zahtjeva', 'No requests currently registered': 'Trenutno nema registrovanih zahtjeva', 'No requests found': 'Zahtjevi nisu pronadjeni', 'No Resource Types defined': 'Nema definisanih tipova resursa', 'No Resources assigned to Incident': 'Resursi nisu dodijeljen ovom incidentu', 'No resources configured yet': 'Još uvijek nema podešenih resursa', 'No resources currently registered': 'Trenutno nema registrovanih resursa', 'No resources currently reported': 'Trenutno nema prijavljenih sredstava', 'No Resources in Inventory': 'Nema resursa u zalihama', 'No Response': 'Nema odgovora', 'No Response Summaries Found': 'Nema sumarnih odgovora nađeno', 'No Responses currently registered': 'Trenutno nema registrovanih odgovora', 'No Restrictions': 'Bez ograničenja', 'No Rivers currently registered': 'Nema trenutno registrovanih rijeka', 'No role to delete': 'Nema uloge za brisanje', 'No roles currently assigned to this user.': 'Nema uloga dodijeljenih ovom korisniku.', 'No Roles currently defined': 'Uloga nije trenutno definirana', 'No Roles defined': 'Nijedna uloga nije definirana', 'No Rooms currently registered': 'Nema trenutno registriranih soba', 'No Scenarios currently registered': 'Nema trenutno prijavljenih scenarija', 'No School Districts currently registered': 'Trenutno nema registrovanih školskih rejona', 'No School Reports currently registered': 'Trenutno nema registriranih izvještaja o školama', 'No Seaports currently registered': 'Trenutno nema registrovanih luka', 'No Search saved': 'Nema snimljene pretrage', 'No Sections currently registered': 'Nema trenutno registrovanih odjela', 'No Sectors currently registered': 'Sektori trenutno nisu registrovani', 'No Sectors found for this Organization': 'Nisu pronađeni sektori za ovu lokaciju', 'No Sectors found for this Project': 'Nema nađenih sektora za ovaj projekt', 'No Sectors found for this Theme': 'Nema nađenih sektora za ovu temu', 'No Senders Whitelisted': 'Nema pošiljaoca na bijeloj listi', 'No Sent Items currently registered': 'Nema trenutno registrovanih poslanih stvari', 'No Sent Shipments': 'Nema poslanih pošiljki', 'No series currently defined': 'Trenutno nema definisanih serija', 'No service profile available': 'Nema dostupnog profila usluge', 'No Services currently registered': 'Trenutno nema registrovanih usluga', 'No Services found for this Organization': 'Nisu pronađene usluge za ovu organizaciju', 'No Settings currently defined': 'Nema trenutno definisanih postavki', 'No Shelter Services currently registered': 'Trenutno nema registriranih usluga skloništa', 'No Shelter Statuses currently registered': 'Trenutno nema registrovanih statusa skloništa', 'No Shelter Types currently registered': 'Trenutno nema registrovanih tipova skloništa', 'No Shelters currently registered': 'Trenutno nema registrovanih skloništa', 'No Shipment Items': 'Nema stavki pošiljke', 'No Shipment Transit Logs currently registered': 'Trenutno nema registrovanih tranzitnih zapisa', 'No Shipment/Way Bills currently registered': 'Trenutno nema registrovanih dostava/putnij n aloga', 'No Skill Types currently set': 'Nijedna vrsta vještina nije trenutno podešena', 'No Skills currently requested': 'Nema trenutno traženih vještina', 'No skills currently set': 'Nijedna vještina nije trenutno podešena', 'No Skills Required': 'Nema potrebnih vještina', 'No SMS currently in InBox': 'Trenutno nema SMS u ulaznom sandučetu', 'No SMS currently in Outbox': 'Trenutno nema SMS u izlaznom sandučetu', "No SMS's currently in Sent": 'Broj SMS u poslanom sandučetu', 'No Solutions currently defined': 'Nema definisanih rješenja', 'No Staff currently registered': 'Nema osoblja trenutno registrovanog', 'No staff or volunteers currently registered': 'Trenutno nema registrovanih članova osoblja ili volontera', 'No Staff Types currently registered': 'Nijedan tip osoblja trenutno registrovan', 'No status information available': 'Informacije o statusu nisu dostupne', 'No status information currently available': 'Nema trenutno dostupnih statusnih informacija', 'No Statuses currently registered': 'Trenutno nema registrovanih statusa', 'No stock adjustments have been done': 'Prilagođenja zaliha nisu obavljena', 'No stock counts have been done': 'Količine zaliha nisu obavljene', 'No Stock currently registered': 'Trenutno nema registrovanih zaliha', 'No Stock currently registered in this Warehouse': 'Nema zaliha registrovanih za ovo skladište', 'No Storage Bin Type currently registered': 'Nijedan tip korpe za smještaj trenutno registrovan', 'No Storage Bins currently registered': 'Trenutno nema registrovanih korpi za smještaj', 'No Storage Locations currently registered': 'Trenutno nema registrovanih logacija za smještaj', 'No Subscription available': 'Nema dostupne pretplate', 'No Subsectors currently registered': 'Trenutno nema registrovanih podsektora', 'No Suppliers currently registered': 'Trenutno nema registrovanih dobavljača', 'No Support Requests currently registered': 'Trenutno nema registrovanih Zahtjeva za podršku', 'No Survey Answers currently entered.': 'Nema trenutno unesenih odgovora na ankete', 'No Survey Answers currently registered': 'Trenutno nema registrovanih anketnih odgovora', 'No Survey Questions currently registered': 'Trenutno nema registrovanih anketnih pitanja', 'No Survey Sections currently registered': 'Trenutno nema registrovanih anketnih odjela', 'No Survey Series currently registered': 'Nema registrovanih Serija Anketa', 'No Survey Template currently registered': 'Trenutno nema registrovanog šablona za anketu', 'No Symbologies currently defined': 'Trenutno nema definisanih značenja simbola', 'No Symbologies currently defined for this Layer': 'Nema trenutno definisanih značenja simbola za ovaj sloj', 'No Sync': 'Bez sinhronizacije', 'No sync permitted!': 'Sinhronizacija nije dozvoljena', 'No synchronization': 'Bez sinhronizacije', 'No tags currently defined': 'Trenutno nema definisanih oznaka', 'No Tasks Assigned': 'Nema dodijeljenih zadataka', 'No tasks currently assigned': 'Trenutno nema dodijeljenih zadataka', 'No tasks currently registered': 'Nema trenutno registriranih zadataka', 'No Tasks currently registered in this event': 'Nijedan zadatak trenutno nije registrovan u ovaj događaj', 'No Tasks currently registered in this incident': 'Trenutno nema zadataka registrovanih u ovom incidentu', 'No Tasks currently registered in this scenario': 'Trenutno nema registrovanih zadataka u ovom scenariju', 'No Tasks with Location Data': 'Nema Zadataka sa Podacima o Lokaciji', 'No Teams currently registered': 'Trenutno nema registrovanih timova', 'No template found!': 'Nema šablona pronađenog!', 'No Template Sections': 'Nema odjeljaka predložaka', 'No Themes currently defined': 'Nijedna Tema nije trenutno definisana', 'No Themes currently registered': 'Trenutno nema registrovanih tema', 'No Themes found for this Activity': 'Nema nađenih tema za ovu aktivnost', 'No Themes found for this Project': 'Nema nađenih tema za ovaj projekat', 'No Themes found for this Project Location': 'Nema nađenih tema za ovu lokaciju projekta', 'No Tickets currently registered': 'Trenutno nema registrovanih kartica', 'No Time Logged': 'Nema zabilježenog vremena', 'No Tours currently registered': 'Trenutno nema registrovanih tura', 'No Tracks currently available': 'Trenutno nema dostupnih zapisa', 'No translations exist in spreadsheet': 'Ne postoje prijevodi u ovoj tablici', 'No Tweets Available.': 'Nema dostupnih Tweet', 'No Tweets currently in InBox': 'Trenutno nema Tweet u ulaznom sandučetu', 'No Tweets currently in Outbox': 'Trenutno nema Tweet u izlaznom sandučetu', 'No Twilio Settings currently defined': 'Nema trenutno definisanih Twilio postavki', 'No Units currently registered': 'Trenutno nema regitrovanih jedinica', 'No units currently registered': 'Trenutno nema regitrovanih jedinica', 'No Users currently registered': 'Nema trenutno registrovanih korisnika', 'No users have taken a tour': 'Nijedan korisnik nije uzeo turu', 'No users with this role at the moment.': 'Nema korisnika s ovom ulogom u datom trenutku.', "No UTC offset found. Please set UTC offset in your 'User Profile' details. Example: UTC+0530": 'Nema pomaka od griničkog vremena. Molim navedite UTC pomak u korisničkim profilima. Primjer: UTC+0530', 'No Vehicle Details currently defined': 'Trenutno nisu definisani detalji vozila', 'No Vehicle Types currently registered': 'Trenutno nema registrovanih tipova vozila', 'No Vehicles currently assigned to this incident': 'Vozila nisu dodijeljena ovom incidentu', 'No Vehicles currently registered': 'Nema registrovanih vozila', 'No volunteer availability registered': 'nema registirane dostupnosti volontera', 'No Volunteer Cluster Positions': 'Nema pozicija skupa volontera', 'No Volunteer Cluster Types': 'Nema tipova skupa volontera', 'No Volunteer Clusters': 'Nema skupova volontera', 'No volunteer information registered': 'nema registiranih informacija o volonterima', 'No Volunteers currently registered': 'Trenutno nema registrovanih volontera', 'No Warehouse Items currently registered': 'Nema registrovanih stavki skladišta', 'No Warehouses currently registered': 'Trenutno nema registrovanih skladišta', 'No Warehouses match this criteria': 'Nema skladišta koja odgovaraju ovom kriteriju', 'non-critical': 'ne-kritično', 'Non-medical Staff': 'Nemedicinsko osoblje', 'Non-structural Hazards': 'Nestrukturne opasnosti', 'None': 'Nema', 'none': 'Nijedno', 'None (no such record)': 'Nijedan (ne postoji takav zapis)', 'None of the above': 'Ništa od noga', 'Noodles': 'Tjestenine', 'Normal': 'Normalan', 'normal': 'normalno', 'Normal food sources disrupted': 'Ukobičajen izvor hrane ometan', 'Normal Job': 'Normalni posao', 'Northern Cyprus': 'Sjeverni Kipar', 'Norway': 'Norveška', 'Nose, Angle': 'Nos, ugao', 'Nose, Curve': 'Nos, krivulja', 'Nose, shape': 'Nos, oblik', 'Nose, size': 'Nos, veličina', 'not accessible - no cached version available!': 'Nije dostupno- nema dostupne cache verzije!', 'not accessible - using cached version from': 'nije dostupno - koristi se cache verzija forme', 'Not allowed to Donate without matching to a Request!': 'Nije dopušteno donirati što nije usaglašeno s zahtjevom', 'Not Applicable': 'Nije primjenjivo', 'not applicable': 'nije primjenjivo', 'Not authorised!': 'Nije odobreno!', 'Not Authorised!': 'Nije dopušteno!', 'Not installed or incorrectly configured.': 'Nije instalirano ili nije pravilno konfigurirano', 'not needed': 'nije potrebno', 'Not Parsed': 'Nije još analizirano', 'Not Possible': 'Nije moguće', 'Not Set': 'Nije postavljeno', 'not specified': 'nenavedeno', 'Not Started': 'Nije započelo', 'not writable - unable to cache GeoRSS layers!': 'nemoguće pisati - nije moguće čuvati GeoRSS slojeve!', 'not writable - unable to cache KML layers!': 'nemoguće pisati - nije moguće čuvati KML slojeve!', 'Not yet a Member of any Group': 'Još nije član ni jedne grupe', 'Not yet a Member of any Team': 'Još nije član ni jednog tima', 'Note': 'Bilješka', 'Note added': 'Napomena dodana', 'Note Details': 'Detalji bilješke', 'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead.': 'Primijetite da ova lista prikazuje samo aktivne volontere. Da biste vidjeli sve ljude registrirane u sistemu, pretražite sa ovog ekrana.', 'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Primijetite da ova lista prikazuje samo aktivne volontere. Da biste vidjeli sve ljude registrirane u sistemu, pretražite sa ovog ekrana', 'Note that when using geowebcache, this can be set in the GWC config.': 'Primijetite da kada se koristi geowebcache, ovo se može postaviti u GWC konfiguraciji.', 'Note Type': 'Vrsta bilješke', 'Note updated': 'Bilješka ažurirana', 'Note: Make sure that all the text cells are quoted in the csv file before uploading': 'Napomena: Obezbijedite da su sve ćelije teksta pod navodnicima u CSV datoteci prije postavljanja', 'Notes': 'Bilješke', 'Notice to Airmen': 'Bilješka za avijatičare', 'Notification frequency': 'Učestanost informisanja', 'Notification method': 'Metod informisanja', 'Notify': 'Informiši', 'num Zoom Levels': 'broj nivoa uvećanja', 'Number': 'Broj', 'Number of Activities': 'Broj aktivnosti', 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Broj dodatnih ležaja tog tipa će, prema očekivanjima, biti dostupan u ovoj jedinici tokom sljedeća 24 sata.', 'Number of alternative places for studying': 'Broj alternativnih mjesta za studiranje', 'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Broj dostupnih kreveta tog tipa u toj jedinici za vrijeme podnošenja izvještaja', 'Number of Barges': 'Broj šlepova', 'Number of Beneficiaries': 'Broj korisnika', 'Number of bodies found': 'Broj tijela pronađen', 'Number of Columns': 'Broj kolona', 'Number of Completed Assessment Forms': 'Broj završenih formulara ocjene', 'Number of deaths during the past 24 hours.': 'Broj smrtnih slučajeva u posljednja 24 sata.', 'Number of Disasters': 'Broj katastrofa', 'Number of discharged patients during the past 24 hours.': 'Broj otpuštenih pacijenata iz bolnice u posljednjih 24h.', 'Number of doctors': 'Broj doktora', 'Number of doctors actively working': 'Broj ljekara aktivno zaposlenih', 'Number of Facilities': 'Broj objekata', 'Number of houses damaged, but usable': 'Broj kuća oštećenih ali upotrebljivih', 'Number of houses destroyed/uninhabitable': 'Broj kuća uništenih/neuseljivih', 'Number of in-patients at the time of reporting.': 'Broj pacijenata u bolnici u vrijeme izvještavanja.', 'Number of Incidents': 'Broj incidenata', 'Number of Items': 'Broj stavki', 'Number of items': 'Broj stavki', 'Number of midwives actively working': 'Broj medicinskih babica aktivno zaposlenih', 'Number of newly admitted patients during the past 24 hours.': 'Broj novih primljenih pacijanata u posljednja 24 sata.', 'Number of non-medical staff': 'Broj osoblja koji nisu ukljuceni u zdravstvo', 'Number of nurses': 'Broj medicinskih sestara', 'Number of nurses actively working': 'Broj medicinskih sestara aktivno zaposlenih', 'Number of Patients': 'Broj pacijenata', 'Number of People Affected': 'Broj ljudi na koje je događaj uticao', 'Number of People Dead': 'Broj umrlih ljudi', 'Number of People Injured': 'Broj povrijeđenih ljudi', 'Number of People Required': 'Broj potrebnih ljudi', 'number of planes': 'broj aviona', 'Number of private schools': 'Broj privatnih škola', 'Number of public schools': 'Broj javnih školskih ustanova', 'Number of religious schools': 'Broj religijskih škola', 'Number of residential units': 'Broj stambenih jedinica', 'Number of residential units not habitable': 'Broj neuseljivih prebivališnih jedinica', 'Number of Resources': 'Broj resursa', 'Number of Responses': 'Broj odgovora', 'Number of Rows': 'Broj Redova', 'Number of schools damaged but usable': 'Ukupni broj škola oštećenih ali upotrebljivih', 'Number of schools destroyed/uninhabitable': 'Broj škola uništenih/neuseljivih', 'Number of schools open before disaster': 'Broj škola otvoren prije katastrofe', 'Number of schools open now': 'Broj škola trenutno otvorenih', 'Number of teachers affected by disaster': 'Broj nastavnika ugroženih zbog katastrofe?', 'Number of teachers before disaster': 'Broj nastavnika prije katastrofe', 'Number of Tugboats': 'Broj skela', 'Number of vacant/available beds in this facility. Automatically updated from daily reports.': 'Broj slobodnih/dostupnih kreveta na ovoj lokaciji. Automatski ažurirano iz dnevnih izvještaja', 'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Broj praznih/slobodnih kreveta u ovoj bolnici. Automatski ažurirano iz dnevnih izvještaja.', 'Number of vacant/available units to which victims can be transported immediately.': 'Broj praznih/dostupnih jedinica u koje žrtve mogu biti odmah transportovane.', 'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Broj ili kod koji se koristi da bi se oznacilo mjesto nalazišta, npr. oznaka zastave, koordinate, referentni broj položaja ili slično (ako je dostupno)', 'Number or Label on the identification tag this person is wearing (if any).': 'Broj ili obilježje na identifikacijskoj oznaci koju osoba nosi (ukoliko postoji).', 'Number/Percentage of affected population that is Female & Aged 0-5': 'Broj/Procent pogođene populacije koju čine žene od 0-5 godina', 'Number/Percentage of affected population that is Female & Aged 13-17': 'Broj/procenat pogođene ženske populacije starosti od 13 do 17 godina', 'Number/Percentage of affected population that is Female & Aged 18-25': 'Broj/Procenat pogođene populacije koja je ženskog spola i starosti 18-25', 'Number/Percentage of affected population that is Female & Aged 26-60': 'Broj/procenat pogođene ženske populacije od 26-60 godina', 'Number/Percentage of affected population that is Female & Aged 6-12': 'Broj/postotak pogođene ženske populacije starosne dobi 6-12 godina', 'Number/Percentage of affected population that is Female & Aged 61+': 'Broj/Procenat pogođene populacije koja je ženskog pola i starosti 61+ godina', 'Number/Percentage of affected population that is Male & Aged 0-5': 'Broj / postotak zahvaćenog stanovništva koji su muškarci i godina 0-5', 'Number/Percentage of affected population that is Male & Aged 13-17': 'Broj/postotak pogođene muške populacije starosne dobi od 13-17 godina', 'Number/Percentage of affected population that is Male & Aged 18-25': 'Broj/ Procent zahvaćene populacije muškog spola u dobi od 18-25', 'Number/Percentage of affected population that is Male & Aged 26-60': 'Broj/Procent pogođenog stanovništva kojeg čine muškarci od 26-60 godina starosti', 'Number/Percentage of affected population that is Male & Aged 6-12': 'Broj/Postotak zahvaćenih muskih osoba izmedu 6 i 12 godina', 'Number/Percentage of affected population that is Male & Aged 61+': 'Broj/procenat muške populacije koja je povrijeđena i imaju 61 i više godina', 'Numbers Only': 'Samo brojevi', 'Numeric': 'Brojčano', 'Nurse': 'Medicinska sestra', 'Nursery Beds': 'Kreveti u jaslicama', 'Nursing Information Manager': 'Upravljanje informacijama o medicinskoj njezi', 'Nutrition': 'Prehrana', 'Nutrition problems': 'Problemi u ishrani', 'NZSEE Level 1': 'NZSEE Nivo 1', 'NZSEE Level 2': 'NZSEE Nivo 2', 'Object': 'Objekat', 'Objectives': 'Ciljevi', 'Observer': 'Posmatrač', 'Obsolete': 'Zastarjelo', 'obsolete': 'zastario', 'Obstetrics/Gynecology': 'Porodilište/Ginekologija', 'OCR Form Review': 'Pregled OCR forme', 'OCR module is disabled. Ask the Server Administrator to enable it.': 'OCR modul je isključen. Pitajte serverskog administratora da ga omogući.', 'OCR review data has been stored into the database successfully.': 'Podaci za OCR pregled su uspješno stavljeni u bazu podataka', 'Office': 'Kancelarija', 'Office added': 'Kancelarija dodana', 'Office Address': 'Adresa kancelarije', 'Office deleted': 'Kancelarija obrisan', 'Office Details': 'Detalji o kancelariji', 'Office Phone': 'Službeni telefon', 'Office Type': 'Tip kancelarije', 'Office Type added': 'Dodan tip kancelarije', 'Office Type deleted': 'Obrisan tip kancelarije', 'Office Type Details': 'Detalji tipa kancelarije', 'Office Type updated': 'Ažuriran tip kancelarije', 'Office Types': 'Tipovi kancelarija', 'Office updated': 'Kancelarija ažurirana', 'Offices': 'Kancelarije', 'Offices & Warehouses': 'Uredi i skladišta', 'Offline Sync': 'Vanmrežna sinhronizacija', 'Offline Sync (from USB/File Backup)': 'Vanmrežna Sinhronizacija (sa USB-a/Pomoćnih Dokumenata)', 'Oil Terminal Depth': 'Dubina naftnog terminala', 'Old': 'Star', 'Older people as primary caregivers of children': 'Stariji ljudi, kao primarni staratelji djece', 'Older people in care homes': 'Stariji ljudi u domovima', 'Older people participating in coping activities': 'Stariji ljudi koji učestvuju u aktivnostima prilagođavanja', 'Older people with chronical illnesses': 'Stariji ljudi s hroničnim bolestima', 'Older person (>60 yrs)': 'Starije osobe (preko 60 god.)', 'on': 'uključeno', 'on %(date)s': 'na %(date)s', 'On by default?': 'Uključeno prema podrazumijevanoj vrijednosti?', 'On by default? (only applicable to Overlays)': 'Automatski postavljeno na uključeno? (Jedino se može primjeniti na Preglede)', 'On Hold': 'Na čekanju', 'On Order': 'U narudžbi', 'On Scene': 'Na sceni', 'On-site Hospitalization': 'Hospitalizacija na licu mjesa', 'once': 'jednom', 'One time cost': 'Jednokratna cijena', 'One Time Cost': 'Jednokratni trošak', 'One-time': 'Jedanput', 'One-time costs': 'Jednokratni troškovi', "Only Categories of type 'Asset' will be seen in the dropdown.": "Samo kategorije tipa 'Sredstvo' će se vidjeti u padajućoj listi.", "Only Categories of type 'Vehicle' will be seen in the dropdown.": "Samo kategorije tipa 'Vozilo' će se vidjeti u padajućoj listi.", "Only Items whose Category are of type 'Vehicle' will be seen in the dropdown.": "Samo stavke čija je kategorija tipa 'Vozilo' će se vidjeti u padajućoj listi.", 'Only showing accessible records!': 'Prikazujem samo pristupačne zapise!', 'Only use this button to accept back into stock some items that were returned from a delivery to beneficiaries who do not record the shipment details directly into the system': 'Koristite ovo dugme za prihvatanje nazad u zalihu stavki koje su vraćene iz isporuke korisnjicima koji nisu zabilježili detalje o isporuci direktno u sistem', 'Only use this button to confirm that the shipment has been received by a destination which will not record the shipment directly into the system': 'Koristite ovo dugme da potvrdite da je pošiljka stigla na odredište bez bilježenja pošiljke direktno u sistem', 'Oops! something went wrong on our side.': 'Oops! Nešto je krenulo po zlu.', 'Oops! Something went wrong...': 'Ups! Nešto nije u redu...', 'Opacity': 'Neprozirnost', 'Opacity (1 for opaque, 0 for fully-transparent)': 'Neprozirnost (1 za neprozirno, 0 potpuno providno)', 'Open': 'Otvori', 'Open area': 'Otvoreno područje', 'Open Chart': 'Otvori dijagram', 'open defecation': 'otvorena defekacija', 'Open in New Tab': 'Otvori u novoj kartici', 'Open Incidents': 'Otvori incidente', 'Open Map': 'Otvori mapu', 'Open recent': 'Otvorni skorašnje', 'Open Report': 'Otvori izvještaj', 'Open Table': 'Otvori tabelu', 'Open Tasks for %(project)s': 'Otvoreni zadaci za %(project)s', 'Open Tasks for Project': 'Otvoreni zadaci za projekt', 'Opening Times': 'Radna vremena', 'OpenStreetMap Layer': 'OpenStreetMap sloj', 'OpenStreetMap OAuth Consumer Key': 'OpenStreetMap OAuth korisnički ključ', 'OpenStreetMap OAuth Consumer Secret': 'OpenStreetMap OAuth korisnička tajna lozinka', 'OpenWeatherMap Layer': 'OpenWeatherMap sloj', 'Operating Rooms': 'Operacione sale', 'Operational': 'Operativno', 'Opportunities to Volunteer On-Site?': 'Prilike za volontera na lokaciji?', 'Opportunities to Volunteer Remotely?': 'Prilike za volotera za udaljeni rad?', 'Option': 'Opcija', 'Option Other': 'Opcija druga', 'Optional': 'Neobavezno', 'optional': 'opcionalno', 'Optional link to an Incident which this Assessment was triggered by.': 'Izborni link na incident koji je potaknuo ovu procjenu.', 'Optional password for HTTP Basic Authentication.': 'Opcionalna lozinka za HTTP osnovnu autentifikaciju', 'Optional selection of a background color.': 'Izbor boje pozadine', 'Optional selection of a MapServer map.': 'Opcionalan izbor MapSever karte.', 'Optional selection of an alternate style.': 'Neobavezna selekcija alternativnog stila', 'Optional Subject to put into Email - can be used as a Security Password by the service provider': 'Neobavezni predmet za staviti u Email - može biti korišten kao sigurnosna šifra od strane pružatelja usluga.', 'Optional username for HTTP Basic Authentication.': 'Opcionalno ime korisnika za HTTP osnovnu autentifikaciju', 'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'Opcionalno. Ako želite stilizirati karakteristike na osnovu vrijednosti atributa, ovdje izaberite atribute koje ćete koristiti.', 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, the workspace is the FeatureType Name part before the colon(:).': 'Opcionalno. U GeoServer-u, ovo je imenski prostor (ne ime!) radnog prostora URI. U sklopu WFS getCapabilities, radni prostor je FeatureType ime prije dvotačke (:).', 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Opcionalno. U GeoServer-u, ovo je URI imenskog prostora (ne ime!) . U sklopu WFS getCapabilities, ovo je dio tipa osobine prije dvotačke (:).', 'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Opcionalno. U GeoServer-u, ovo je imenski prostor radnog prostora URI. U sklopu WFS getCapabilities, ovo je dio tipa osobine prije dvotačke (:).', 'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'Izborno. Naziv elementa čiji sadržaji trebaju biti URL slikovne datoteke stavljene u Popup-e.', 'Optional. The name of an element whose contents should be put into Popups.': 'Opcionalno. Ime elementa čiji sadržaj bi trebao biti unutar iskočnih prozora.', "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Opcionalno. Ime geometrijske kolone. U postGIS ovo je automatski 'the_geom'.", 'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'Opcionalno. Ime šeme. Na Geoserveru ovo ima formu http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.', 'Options': 'Opcije', 'or': 'ili', 'Or add a new language code': 'Ili dodaj novu šifru jezika', 'or import from csv file': 'ili uvoz iz CSV dokumenta', 'OR Reason': 'OR razlog', 'OR Status Reason': 'OR Status razlog', 'Order': 'Narudžba', 'Order canceled': 'Narudžba otkazana', 'Order Created': 'Narudžba kreirana', 'Order Details': 'Detalji narudžbe', 'Order Due %(date)s': 'Rok narudžbe %(date)s', 'Order Item': 'Stavka narudžbe', 'Order updated': 'Narudžba ažurirana', 'Ordered list ... (#TODO [String])': 'Uerđena lista ... (#TODO [String])', 'Orders': 'Narudžbe', 'Organization': 'Organizacija', 'Organization added': 'Dodana organizacija', 'Organization added to Policy/Strategy': 'Organizacija dodana u politiku/strategiju', 'Organization added to Project': 'Organizacija dodana u projekt', 'Organization deleted': 'Obrisana organizacija', 'Organization Details': 'Detalji organizacije', 'Organization Domain added': 'Domena organizacije dodana', 'Organization Domain deleted': 'Domena organizacije obrisana', 'Organization Domain Details': 'Detalji domene organizacije', 'Organization Domain updated': 'Domena organizacije ažurirana', 'Organization Domains': 'Domene organizacije', 'Organization Group': 'Grupa organizacija', 'Organization group': 'Grupe organizacija', 'Organization Needs': 'Potrebe organizacije', 'Organization Needs added': 'Potrebe organizacije dodane', 'Organization Needs deleted': 'Potrebe organizacije obrisane', 'Organization Needs updated': 'Potrebe organizacije ažurirane', 'Organization Registry': 'Registar Organizacija', 'Organization removed from Policy/Strategy': 'Organizacija uklonjena iz politike/strategije', 'Organization removed from Project': 'Organizacija uklonjena sa projekta', 'Organization Type': 'Tip organizacije', 'Organization Type added': 'Vrsta organizacije dodana', 'Organization Type deleted': 'Vrsta organizacije obrisana', 'Organization Type Details': 'Detalji tipa organizacije', 'Organization Type updated': 'Vrsta organizacije ažurirana', 'Organization Types': 'Tipovi organizacije', 'Organization Units': 'Organizacione jedinice', 'Organization updated': 'Ažurirana organizacija', 'Organization(s)': 'Organizacije', 'Organization/Branch': 'Organizacija/Ogranak', 'Organization/Supplier': 'Organizacija/dobavljač', 'Organization:': 'Organizacija:', 'Organizations': 'Organizacije', 'Organizations / Teams / Facilities': 'Organizacije / Timovi / Objekti', 'Organized By': 'Organizovao', 'Origin': 'Porijeklo', 'Origin of the separated children': 'Porijeklo odvojene djece', 'Original': 'Izvorno', 'Original Quantity': 'Izvorna količina', 'Original Value per Pack': 'Izvorna vrijednost po paketu', 'OSM file generation failed!': 'Generisanje OSM datoteke neuspjelo!', 'OSM file generation failed: %s': 'Generisanje OSM datoteke neuspjelo: %s', 'Other': 'Ostalo', 'other': 'drugo', 'Other (describe)': 'Ostalo (opis)', 'Other (specify)': 'Ostalo (navedi)', 'Other activities of boys 13-17yrs': 'Ostale aktivnosti dječaka od 13-17 godina', 'Other activities of boys 13-17yrs before disaster': 'Druge aktivnosti dječaka uzrasta od 13 do 17 godina, prije nesreće', 'Other activities of boys <12yrs': 'Druge aktivnosti dječaka mlađih od 12 godina', 'Other activities of boys <12yrs before disaster': 'Druge aktivnosti dječaka <12godina prije nesreće', 'Other activities of girls 13-17yrs': 'Druge aktivnosti djevojčiva 13-17 godina', 'Other activities of girls 13-17yrs before disaster': 'Ostale aktivnosti djevojčica 13-17 godina prije katastrofe', 'Other activities of girls<12yrs': 'Druge aktivnosti djevojčica < 12godina', 'Other activities of girls<12yrs before disaster': 'Ostale aktivnosti djevojčica <12 godina prije katastrofe', 'Other Address': 'Druga Adresa', 'Other alternative infant nutrition in use': 'Druga alternativa u prehrani dojenčadi u upotrebi', 'Other alternative places for study': 'Alternativna mjesta za učenje', 'Other assistance needed': 'Potrebna druga pomoć', 'Other assistance, Rank': 'Ostale vrte pomoći, Poredak', 'Other current health problems, adults': 'Ostali aktuelni zdravstveni problemi odraslih', 'Other current health problems, children': 'Drugi trenutni zdravstveni problemi, djeca', 'Other Details': 'Drugi detalji', 'Other events': 'Drugi događaji', 'Other Evidence': 'Ostali dokazi', 'Other factors affecting school attendance': 'Drugi faktori koji utiču na pohađanje škole', 'Other Faucet/Piped Water': 'Druga voda iz slavine/vodovoda', 'Other Inventories': 'Druge zalihe', 'Other Isolation': 'Druge izolacije', 'Other major expenses': 'Drugi veći troškovi', 'Other Name': 'Drugo ime', 'Other non-food items': 'Ostali neprehrambeni artikli', 'Other recommendations': 'Ostale preporuke', 'Other residential': 'Ostala prebivališta', 'Other school assistance received': 'Druga nastavna pomoć primljena', 'Other school assistance, details': 'Asistencija ostalih škola, detalji', 'Other school assistance, source': 'Ostale pomoći u školi, izvor', 'Other settings can only be set by editing a file on the server': 'Ostale postavke mogu biti postavljene jedino uređivanjem datoteke na serveru', 'Other side dishes in stock': 'ostali prilozi u zalihama', 'Other types of water storage containers': 'Drugi tipovi spremnika za vodu', 'Other Users': 'Drugi korisnici', 'Other ways to obtain food': 'Drugi načini za dobavu hrane', 'Others': 'Ostali', 'Out': 'Van', 'Outbound Mail settings are configured in models/000_config.py.': 'Poruke koje se salju su konfigurisane u models/000_config.py.', 'Outbox': 'Za slanje', 'Outcomes, Impact, Challenges': 'Izlazi, utjecaj izazovi', 'Outgoing SMS handler': 'Rukovodilac odlaznih SMS poruka', 'Outgoing SMS Handler': 'Upravljač izlaznog SMS', 'Output': 'Izlaz', 'Output added': 'Izlaz dodan', 'Output deleted': 'Izlaz uklonjen', 'Output updated': 'Izlaz ažuriran', 'Outputs': 'Izlazi', 'oval': 'ovalno', 'over one hour': 'preko sat', 'Overall Hazards': 'Ukupni rizici', 'Overall status of the clinical operations.': 'Ukupni status kliničkih operacija.', 'Overall status of the facility operations.': 'Ukupni status operacija objekta', 'Overhead falling hazard': 'Opasnost od predmeta koji padaju s visine', 'Overland Flow Flood': 'Poplava kopnenog toka', 'Overlays': 'Preklopi', 'Overview': 'Pregled', 'Owned By (Organization/Branch)': 'Vlasnik (organizacija/grana)', 'Owned Records': 'Broj zapisa u vlasništvu', 'Owned Resources': 'Posjedovani resursi', 'Ownership': 'Vlasništvo', 'Owning Organization': 'Vlasnička organizacija', 'Pacific Islands Framework for Action on Climate Change. Applicable to projects in Pacific countries only': 'Okvir pacifičkih ostrva za akcije o klimatskim promjenama. Primjenjivo samo na projekte u pacifičkim zemljama', 'Pack': 'Paket', 'pack of 10': 'Paket od 10', 'Packet': 'Paket', 'Packs': 'Paketi', 'Page': 'Stranica', 'painted': 'nacrtano', 'Pan Map: keep the left mouse button pressed and drag the map': 'Prevlačenje mape: držite lijevo dugme miša pritisnuto i vucite mapu', 'Papua New Guinea': 'Papua Nova Gvineja', 'Paraguay': 'Paragvaj', 'Parameters': 'Parametri', 'Parapets, ornamentation': 'Parapeti, ukrašavanja', 'Parent': 'Roditelj', 'Parent Item': 'Nadređena stavka', "Parent level should be higher than this record's level. Parent level is": 'Roditeljski nivo bi trebao biti viši od nivoa ovog zapisa. Roditeljski nivo je', 'Parent needs to be of the correct level': 'Roditelj treba da bude na odgovarajućoj razini', 'Parent needs to be set': 'Roditelj treba biti postavljen', 'Parent needs to be set for locations of level': 'Roditelj treba bit postavljen na lokacijama nivoa', 'Parent Office': 'Matični ured', 'Parents/Caregivers missing children': 'Roditelji/Staratelji djece koja su nestala', 'Parking Area': 'Parking područje', 'Parking/Tarmac Space Capacity': 'Kapacitet parking prostora', 'Parking/Tarmac Space Units': 'Jedinice parking prostora', 'Parse': 'Raščlani', 'Parsed': 'Raščlanjeno', 'Parser connected': 'Povezano kroz parser', 'Parser Connection Details': 'Detalji parserske konekcije', 'Parser connection removed': 'Uklonjena parserska konekcija', 'Parser connection updated': 'Parserska konekcija je ažurirana', 'Parser Connections': 'Parserske konekcije', 'Parsing Settings': 'Postavke parsera', 'Parsing Status': 'Status parsera', 'part': 'dio', 'Part of the URL to call to access the Features': 'Dio URL koji se zove za pristup objektima', 'Partial': 'Djelimično', 'Partial Database Synchronization': 'Djelomična sinhronizacija sa bazom podataka', 'Participant': 'Učesnik', 'Participant added': 'Učesnik dodan', 'Participant deleted': 'Učesnik obrisan', 'Participant Details': 'Detalji učesnika', 'Participant updated': 'Učesnik ažuriran', 'Participants': 'Učesnici', 'Partner added': 'Partner dodan', 'Partner deleted': 'Partner izbrisan', 'Partner Details': 'Detalji partnera', 'Partner Organization added': 'Organizacija partnera dodana', 'Partner Organization deleted': 'Organizacija partnera obrisana', 'Partner Organization Details': 'Detalji partnersk organizacije', 'Partner Organization updated': 'Organizacija partnera ažurirana', 'Partner Organizations': 'Partnerske organizacije', 'Partner updated': 'Partner ažuriran.', 'Partners': 'Partneri', 'Pashto': 'Pašto', 'Pass': 'Prolaz', 'Passport': 'Pasoš', 'Password': 'Lozinka', "Password fields don't match": 'Polja za lozinku se ne podudaraju', 'Password to use for authentication at the remote site.': 'Lozinka za prijavu na udaljeni sajt-', 'Path': 'Putanja', 'Pathology': 'Patologija', 'Patient': 'Pacijent', 'Patient added': 'Pacijent dodan', 'Patient deleted': 'Pacijent obrisan', 'Patient Details': 'Detalji o pacijentu', 'Patient Tracking': 'Praćenje pacijenta', 'Patient Transportation Ambulance': 'Ambulanta za transport pacijenata', 'Patient updated': 'Pacijent ažuriran', 'Patients': 'Pacijenti', 'PDF File': 'PDF datoteka', 'Pediatric ICU': 'Pedijatrijska intenzivna njega', 'Pediatric Psychiatric': 'Pedijatrijsko psihijatrijsko', 'Pediatrics': 'Doktor za bolesti i povrijede djece i maloljetnika', 'Peer': 'Saradnik', 'Peer added': 'Saradnik dodan', 'Peer deleted': 'Saradnik obrisan', 'Peer Details': 'Detalji o saradniku', 'Peer not allowed to push': 'Nije dozvoljeno gurati saradnika', 'Peer Registration': 'Registracija saradnika', 'Peer Registration Details': 'Detalji registracije saradnika', 'Peer Registration Request': 'Zahtjev za registraciju saradnika', 'Peer registration request added': 'Dodat zahtjev za registraciju saradnika', 'Peer registration request deleted': 'Zahtjev za registracijom saradnika je obrisan', 'Peer registration request updated': 'Zahtjev za registraciju saradnika ažuriran', 'Peer Type': 'Tip saradnika', 'Peer UID': 'UID saradnika', 'Peer updated': 'Ažuriran saradnik', 'Peers': 'Saradnici', 'pending': 'čeka', 'Pending': 'U toku', 'Pending Requests': 'Zahtjevi na čekanju', 'people': 'ljudi', 'People': 'Ljudi', 'People added to Commitment': 'Ljudi dodani u zaduženje', 'People Needing Food': 'Ljudi koji trebaju hranu', 'People Needing Shelter': 'Ljudi kojima je potrebno sklonište', 'People Needing Water': 'Ljudi koji trebaju vodu', 'People removed from Commitment': 'Osobe odstranjene iz zaduženja', 'People Trapped': 'Zarobljeni ljudi', 'People with chronical illnesses': 'Ljudi s hroničnim bolestima', 'per': 'po', 'Percentage': 'Procenat', 'Performance Rating': 'Ocjena izvedbe', 'Permanent Home Address': 'Stalna kućna adresa', 'Person': 'Osoba', 'Person 1': 'Osoba broj 1', 'Person 1, Person 2 are the potentially duplicate records': 'Osoba 1, Osoba 2 su mogući dupli zapisi', 'Person 2': 'Osoba 2', 'Person added': 'Osoba dodana', 'Person added to Commitment': 'Osoba dodana zaduženju', 'Person added to Group': 'Osoba dodana u grupu', 'Person added to Team': 'Osoba dodana u tim', 'Person Data': 'Lični podaci', 'Person De-duplicator': 'Deduplikator osoba', 'Person deleted': 'Osoba obrisana', 'Person Details': 'Detalji osobe', 'Person details updated': 'Detalji o osobi ažurirani', 'Person Entity': 'Jedinka osobe', 'Person Finder': 'Nalazač osoba', 'Person found': 'Osoba pronađena', 'Person interviewed': 'Osoba ispitana', 'Person Management': 'Upravljanje osobljem', 'Person missing': 'Nedostaje osoba', 'Person must be specified!': 'Osoba se mora navesti!', 'Person or OU': 'Osoba ili OJ', 'Person Registry': 'Registar osoba', 'Person removed from Commitment': 'Osoba odstranjena iz zaduženja', 'Person removed from Group': 'Osoba odstranjena iz grupe', 'Person removed from Team': 'Osoba odstranjena iz tima', 'Person reporting': 'Osoba koja je prijavila', 'Person Transportation Tactical Vehicle': 'Lično transportno taktičko vozilo', 'Person updated': 'Osoba ažurirana', 'Person who has actually seen the person/group.': 'Osoba koja je zapravo vidjela osobu/grupu.', "Person's Details": 'Detalji osobe', "Person's Details added": 'Detalji o osobi dodani', "Person's Details deleted": 'Detalji o osobi obrisani', "Person's Details updated": 'Detalji o osobi ažurirani', 'Person.': 'Osoba', 'Person/Group': 'Osoba/Grupa', 'Personal': 'Lično', 'Personal Data': 'Lični podaci', 'Personal Effects': 'Lični efekti', 'Personal Effects Details': 'Detalji ličnih uticaja', 'Personal impact of disaster': 'Lični utjecaj katastrofe', 'Personal Map': 'Lična mapa', 'Personal Profile': 'Lični profil', 'Persons': 'Osobe', 'Persons in institutions': 'Osobe u institucijama', 'Persons per Dwelling': 'Osoba po stambenoj jedinici', 'Persons with disability (mental)': 'Osobe sa (mentalnim) invaliditetom', 'Persons with disability (physical)': 'Osobe s invaliditetom (tjelesnim)', "Persons' Details": 'Detalji o osobama', 'Philippines': 'Filipini', 'Phone': 'Telefon', 'Phone #': 'Telefon #', 'Phone 1': 'Telefon 1', 'Phone 2': 'Telefon 2', 'Phone number is required': 'Potreban je telefonski broj', "Phone number to donate to this organization's relief efforts.": 'Telefonski broj na koji se donira pomoć žrtvama ove organizacije.', 'Phone/Business': 'Telefon/ Posao', 'Phone/Emergency': 'Telefon/Hitni', 'Phone/Exchange': 'Telefon/razmjena', 'Phone/Exchange (Switchboard)': 'Telefonska centrala', 'Photo': 'Fotografija', 'Photo added': 'Fotografija dodana', 'Photo deleted': 'Fotografija obrisana', 'Photo Details': 'Detalji o fotografiji', 'Photo Taken?': 'Fotografija napravljena?', 'Photo updated': 'Fotografija ažurirana', 'Photograph': 'Fotografija', 'Photos': 'Fotografije', 'Physical': 'Fizički', 'Physical Description': 'Fizički opis', 'Physical Safety': 'Fizička sigurnost', 'Picture': 'Slika', 'Picture upload and finger print upload facility': 'Mogućnost uploada slike i otiska prsta', 'piece': 'Dio', 'PIFACC Priorities': 'PIFACC Prioriteti', 'PIFACC-1: Implementing Tangible, On-Ground Adaptation Measures': 'PIFACC-1: Implementacija realnih mjerenja s adaptacijom na zemljičtu', 'PIFACC-2: Governance and Decision Making': 'PIFACC-2: Vlada i donošenje odluka', 'PIFACC-3: Improving our understanding of climate change': 'PIFACC-3: Poboljšanje razumijevanja klimatskih promjena', 'PIFACC-4: Education, Training and Awareness': 'PIFACC-4: Obrazovanje, obuka i informisanost', 'PIFACC-5: Mitigation of Global Greenhouse Gas Emissions': 'PIFACC-5: Smanjenje globalnog ispuštanja gasova koji izazivaju efekat staklene bašte', 'PIFACC-6: Partnerships and Cooperation': 'PIFACC-6: Partnerstvo i saradnja', 'PIL (Python Image Library) not installed': 'PIL (Python Image Library) nije instalirana', 'PIN': 'PIN', 'PIN number': 'PIN broj', 'PIN number ': 'PIN broj - osobni identifikacijski broj ', 'Pipe': 'Cijev', 'pit': 'jama', 'pit latrine': 'septička jama poljskog zahoda', 'PL Women': 'PL žene', 'Place': 'Mjesto', 'Place for solid waste disposal': 'Mjesto za ostavljanje čvrstog smeća', 'Place of Birth': 'Mjesto rođenja', 'Place of find': 'Mjesto pronalaska', 'Place of Recovery': 'Mjesto oporavka', 'Place on Map': 'Mjesto na karti', 'Places for defecation': 'Mjesta za vršenje nužde', 'Places the children have been sent to': 'Mjesta gdje su djeca poslana', 'Planned': 'Planirano', 'Planned %(date)s': 'Planirano %(date)s', 'Planned Procurement': 'Planirana nabava', 'Planned Procurement Item': 'Planirana stavka nabavke', 'Planned Procurements': 'Planirane nabave', 'Playing': 'Izvršava', 'Please choose a type': 'Odaberite tip', "Please come back after sometime if that doesn't help.": 'Molim vratite se nakon nekog vremena, ako to ne pomogne.', 'Please correct all errors.': 'Molim da ispravite sve greške.', 'Please do not remove this sheet': 'Molim da ne brišete ovaj list', 'Please enter a %(site)s': 'Molimo unesite %(site)s', 'Please enter a %(site)s OR an Organization': 'Molimo unesite %(site)s ILI organizaciju', 'Please enter a first name': 'Molimo unesite ime', 'Please enter a last name': 'Molim, unesite prezime', 'Please enter a number only': 'Molim unesite samo broj', 'Please enter a site OR a location': 'Molimo unesite mjesto ILI lokaciju', 'Please enter a valid email address': 'Unesite važeću adresu elektronske pošte', 'Please enter an Organization/Supplier': 'Molimo unesite organizaciju/dobaljača', 'Please enter details of the Request': 'Molim unesite detalje zahtjeva', 'Please enter request details here.': 'Molim unesite detalje zahtjeva ovdje.', 'Please enter the details on the next screen.': 'Molim unesite detalje zahtjeva na sljedećem ekranu.', 'Please enter the first few letters of the Person/Group for the autocomplete.': 'Molimo unesite prvih nekoliko slova Osobe/Grupe za automatsko popunjavanje.', 'Please enter the recipient': 'Molimo dodajte primatelja', 'Please enter the recipient(s)': 'Molimo unesite primatelja', 'Please fill this!': 'Molim Vas popunite ovo!', 'Please give an estimated figure about how many bodies have been found.': 'Molimo dajte okvirnu procjenu koliko je tijela pronađeno.', "Please provide as much detail as you can, including the URL(s) where the bug occurs or you'd like the new feature to go.": 'Molim navedite što više detalja, uključujući URL gdje se greška dešava ili želite nove mogućnosti.', 'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Molimo unesite URL stranice na koju mislite, opis onoga što očekujete da će se desiti i onoga što se ustvari dogodilo', 'Please record Beneficiary according to the reporting needs of your project': 'Molim zapišite korisnmika prema potrebama izvještavanja vašeg projekta', 'Please report here where you are:': 'Molim Vas da ovdje prijavite gdje se nalazite:', 'Please select': 'Označite,molim', 'Please Select a Facility': 'Molim odaberite objekat', 'Please select a valid image!': 'Molim izaberite ispravnu sliku', 'Please select another level': 'Molimo odaberite drugi nivo', 'Please select exactly two records': 'Molim odaberite tačno dva zapisa', 'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': 'Molimo da se prijavite koristeći svoj broj mobilnog telefona, jer nam to omogućava da vam šaljemo SMS poruke. Molimo da napišete kompletan pozivni broj.', 'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Molimo detaljno specificirajte probleme i prepreke sa pravilnim pristupanjem bolesti (u brojevima, gdje je prikladno). Možete također dodati prijedloge gdje bi situacija mogla biti poboljšana.', 'Please use this field to record any additional information, including a history of the record if it is updated.': 'Molimo koristite ovo polje da popunite dodatne informacije, uključujući istorijat zapisa ako je ažuriran.', 'Please use this field to record any additional information, including any Special Needs.': 'Molimo Vas iskorisite ovo polje da snimite dodatne informacije, uključujući bilo kakve specijalne potrebe.', 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Molimo koristite ovo polje da zabilježite dodatne informacije, poput Ushahidi ID instance. Uključite istorijat zapisa ako je ažuriran.', 'Pledge': 'Obećati podršku', 'Pledge Aid': 'Ponudi pomoć', 'Pledge Aid to match these Requests': 'Ponudi pomoć koja je usaglašena s ovim zahtjevima', 'Pledge Status': 'Status ponude pomoći', 'Pledge Support': 'Obećati podršku', 'Pledges': 'Ponude za pomoć', 'PoI': 'Tačka interesa', 'PoI Type added': 'Tip tačke interesa dodan', 'PoI Type deleted': 'Tip tačke interesa obrisan', 'PoI Type Details': 'Detalji o vrsti tačke interesa', 'PoI Type updated': 'Tip tačke interesa ažuriran', 'PoI Types': 'Tipovi tačaka interesa', 'Point': 'Tačka', 'Point of Interest added': 'Tačka interesa dodana', 'Point of Interest deleted': 'Tačka interesa obrisana', 'Point of Interest Details': 'Detalji tačaka interesa', 'Point of Interest updated': 'Tačka interesa ažurirana', 'pointed': 'označeno', 'Points of Interest': 'Tačke interesa', 'PoIs': 'Tačke interesa', 'PoIs successfully imported.': 'Tačke interesa uspješno uvezene.', 'Poisoning': 'Trovanje', 'Poisonous Gas': 'Otrovni gas', 'Poland': 'Poljska', 'Police': 'Policija', 'Policies & Strategies': 'Politike ili strategije', 'Policy': 'Pravila', 'Policy or Strategy': 'Politika ili strategija', 'Policy or Strategy added': 'Politika ili strategija dodana', "Policy or Strategy added, awaiting administrator's approval": 'Dodana politika ili strategija, čeka se na potvrdu administratora.', 'Policy or Strategy deleted': 'Politika ili strategija obrisana', 'Policy or Strategy updated': 'Politika ili strategija ažurirana', 'Poll': 'Anketa', 'Pollution and other environmental': 'Zagađenja i druge okolišne', 'Polygon': 'Poligon', 'Polygon reference of the rating unit': 'Poligon referenca jedinice za procjenu', 'Poor': 'Slabo', 'Population': 'Populacija', 'Population and number of households': 'Stanovnistvo i broj domacinstava', 'Population Statistic added': 'Statistika o populaciji dodana', 'Population Statistic deleted': 'Statistika stanovništva izbrisana', 'Population Statistic Details': 'Statističke pojedinosti populacije', 'Population Statistic updated': 'Statistika stanovništva osvježena', 'Population Statistics': 'Statistika o populaciji', 'Popup Fields': 'Popup stavke', 'Popup Label': 'Iskočna oznaka', 'Porridge': 'Kaša', 'Port Closure': 'Zatvaranje luke', 'Portable App': 'Prenosiva aplikacije', 'Portuguese': 'Portugalski', 'Portuguese (Brazil)': 'Portugalski (Brazil)', 'Position': 'Pozicija', 'Position added': 'Pozicija dodana', 'Position Catalog': 'Katalog Položaja', 'Position deleted': 'Pozicija izbrisana', 'Position Details': 'Detalji pozicije', 'Position in tour': 'Položaj na turi', 'Position updated': 'Pozicija ažurirana', 'Positions': 'Položaji', 'Post': 'Ubacivi tekst', 'Post added': 'Ubacivi tekst dodan', 'Post deleted': 'Ubacivi tekst obrisan', 'Post Details': 'Detalji ubacivog teksta', 'Post removed': 'Ubacivi tekst uklonjen', 'Post set as Module/Resource homepage': 'Ubacivi tekst postavljen kao početna stranica modula/resursa', 'Post Tagged': 'Ubacivi tekst označen', 'Post updated': 'Ubacivi tekst ažuriran', 'Post-impact shelterees are there for a longer time, so need more space to Sleep.': 'Skloništa poslije nesrećnog utjecaja su ovdje za duže vrijeme, pa je potrebno više prostora za spavanje.', 'Postcode': 'Poštanski broj', 'Posted on': 'Postavljeno', 'postponed': 'odgođeno', 'Posts': 'Ubacivi tekstovi', 'Poultry': 'Perad', 'Poultry restocking, Rank': 'Obnova zaliha peradi, Rang', 'Pounds': 'Funte', 'Power': 'Napajanje', 'Power Failure': 'Nestanak struje', 'Power Outage': 'Nestanak napajanja', 'Power Supply Type': 'Vrsta napajanja', 'Powered by Sahana Eden': 'Omogućeno od strane Sahana Eden', 'Pre-cast connections': 'Konekcije prije emitovanja', 'Preferred Name': 'Preferirano ime', 'Pregnant women': 'Trudnica', 'Preliminary': 'Preeliminarno', 'preliminary template or draft, not actionable in its current form': 'preliminarni šablon ili nacrt, nije kažnjiv u trenutnom obliku', 'Prepare Shipment': 'Pripremi ošiljku', 'Presence': 'Prisustvo', 'Presence Condition': 'stanje prisutnosti', 'Presence Log': 'Zapisnik prisustva', 'Previous': 'Prethodni', 'previous 100 rows': 'prethodnih 100 redova', 'Previous View': 'Prethodni prikaz', 'primary incident': 'primarni incident', 'Primary Name': 'Primarni naziv', 'Primary Occupancy': 'Primarno zanimanje', 'Principal': 'Najvažnije', 'Print': 'Štampa', 'Priority': 'Prioritet', 'Priority from 1 to 9. 1 is most preferred.': 'Prioritet od 1 do 9. 1 je najviše željen.', 'Priority Level': 'Nivo prioriteta', 'Privacy': 'Privatnost', 'Private': 'Privatno', 'Problem added': 'Problem dodan', 'Problem Administration': 'Upravljanje problemima', 'Problem connecting to twitter.com - please refresh': 'Problem konektovanja na twitter.com - molimo osvježite stranicu', 'problem connecting to twitter.com - please refresh': 'Problem konektovanja na twitter.com - molimo osvježite stranicu', 'Problem deleted': 'Izbrisan problem', 'Problem Details': 'Detalji problema', 'Problem Group': 'Grupa problema', 'Problem Title': 'Naslov problema', 'Problem updated': 'Problem ažuriran', 'Problems': 'Problemi', 'Problems? Please call': 'Problemi? Molim pozovite', 'Procedure': 'Procedura', 'Process Received Shipment': 'Obradi primljenu isporuku', 'Process Shipment to Send': 'Procesiraj pošiljku za slanje', 'Processed with KeyGraph?': 'Obrađeno koristeći KeyGraph', 'Processing': 'Obrada', 'Procured': 'Nabavljeno', 'Product Description': 'Opisproizvoda', 'Profession': 'Profesija', 'Professional Experience': 'Profesionalno iskustvo', 'Professional Experience added': 'Profesionalno iskustvo dodano', 'Professional Experience deleted': 'Profesionalno iskustvo obrisano', 'Professional Experience Details': 'Detalji profesionalnog iskustva', 'Professional Experience updated': 'Profesionalno iskustvo ažurirano', 'Profile': 'Profil', 'Profile Configuration': 'Konfiguracija Profila', 'Profile Configuration removed': 'Konfiguracija profila izbrisana', 'Profile Configuration updated': 'Ažurirana konfiguracija mape', 'Profile Configurations': 'Konfiguracije profila', 'Profile Configured': 'Profil konfigurisan', 'Profile Details': 'Detalji profila', 'Profile Page': 'Stranica profila', 'Profile Picture': 'Slika profila', 'Profile Picture?': 'Slika profila?', 'Profiles': 'Profili', 'Program added': 'Program dodan', 'Program deleted': 'Program obrisan', 'Program Details': 'Detalji programa', 'Program Hours (Month)': 'Programski sati (mjeseci)', 'Program Hours (Year)': 'Programski sati (godina)', 'Program updated': 'Program ažuriran', 'Programs': 'Programi', 'Project': 'Projekt', 'Project Activity': 'Aktivnosti projekta', 'Project added': 'Projekat je dodan', 'Project Calendar': 'Kalendar projekta', 'Project deleted': 'Projekat je obrisan', 'Project Details': 'Detalji Projekta', 'Project Framework': 'Radni okvir projekta', 'Project has no Lat/Lon': 'Projekat nema Lat/Lon koordinate', 'Project Management': 'Upravljanje Projektom', 'Project Name': 'Ime projekta', 'Project not Found': 'Projekt nije nađen', 'Project Organization Details': 'Detalji organizacije projekta', 'Project Organization updated': 'Organizacija projekta ažurirana', 'Project Organizations': 'Organizacije projekta', 'Project Report': 'Izvještaj projekta', 'Project Status': 'Status projekta', 'Project Time Report': 'Izvještaj o projektnom vremenu', 'Project Tracking': 'Praćenje projekata', 'Project updated': 'Projekat je ažuriran', 'Projection': 'Projekcija', 'Projection added': 'Projekcija je dodana', 'Projection deleted': 'Projekcija je obrisana', 'Projection Details': 'Detalji projekcije', 'Projection Type': 'Tip projekcije', 'Projection updated': 'Projekcija je ažurirana', 'Projections': 'Projekcije', 'Projects': 'Projekti', 'Projects Map': 'Mapa projekata', 'Prominent Adams apple': 'Uočljiva Adamova jabučica', 'pronounced': 'izgovoreno', 'Property reference in the council system': 'Preporuka vlasništva u sistemu vijeća', 'Proposed': 'Predloženo', 'Protected resource': 'Zaštićeni resurs', 'Protection': 'Zaštita', 'Protocol': 'Protokol', 'Provide a password': 'Obezbijedi lozinku', 'Provide an optional sketch of the entire building or damage points. Indicate damage points.': 'Osiguravanje dodatnih skica cijele zgrade ili oštećenih tačaka. Navesti oštečene tačke.', 'Provide Metadata for your media files': 'Obezbjedi metapodatke za vaše medijske datoteke', 'Province': 'Područje', 'Proxy Server URL': 'URL za posrednički (proxy) server', 'Proxy-server': 'Proxy server', 'Psychiatrics/Adult': 'Psihijatrija/odrasli', 'Psychiatrics/Pediatric': 'Psihijatrija/Pedijatrija', 'Pubic hair, Colour': 'Dlake na polnim organima, boja', 'Pubic hair, Extent': 'Dlake na polnim organima, dužina', 'Public': 'Javno', 'Public and private transportation': 'Javni i privatni transport', 'Public assembly': 'Javni skup', 'Public Event': 'Javni događaj', 'Published on': 'Objavljeno', 'pull': 'povuci', 'pull and push': 'povuci i gurni', 'Pull tickets from external feed': 'Povucite karticu sa spoljašnjeg snabdjevanja', 'Punjabi': 'Pandžabi', 'Purchase': 'Kupovina', 'Purchase Date': 'Datum kupovine', 'Purchase Price': 'Nabavna cijena', 'Purpose': 'Namjena', 'push': 'gurni', 'Push tickets to external system': 'Guranje kartica u vanjski sistem', 'Put a choice in the box': 'Označite izbor', 'pygraphviz library not found': 'pygraphviz biblioteka nije nađena.', 'pyramidal': 'piramidalno', 'Pyroclastic Flow': 'Piroklastični tok', 'Pyroclastic Surge': 'Vulkanski pepeo', 'pyserial module not available within the running Python - this needs installing for SMS!': 'pyserial modul nije dostupan unutar tekućeg Pythona-potrebna je instalacija za SMS!', 'Python GDAL required for Shapefile support!': 'Python GDAL potreban za podršku datotekama s likovima!', 'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial modul nije dostupan kada je Python pokrenut - ovo zahtijeva instalaciju da bi se aktivirao modem', 'Qatar': 'Katar', 'quadrangular': 'četverougaoni', 'Quantity': 'Količina', 'Quantity Committed': 'Količina učinjena', 'Quantity Fulfilled': 'Količina ispunjena', "Quantity in %s's Inventory": "Količina u %s's Inventaru", "Quantity in %s's Warehouse": 'Količina u %s skladištu', 'Quantity in Transit': 'Količina u prelazu', 'Quantity Needed': 'Potrebna količina', 'Quantity range': 'Opseg količine', 'Quantity Received': 'Primljena količina', 'Quantity Returned': 'Vraćena količina', 'Quantity Sent': 'Poslana količina', 'Quarantine': 'Karantena', 'Queries': 'Upiti', 'Query': 'Upit', 'Query added': 'Upit dodan', 'Query deleted': 'Upit obrisan', 'Query Feature': 'Upit karakteristika', 'Query updated': 'Upit ažuriran', 'Query:': 'Upit:', 'Queryable?': 'Moguće postaviti u upit?', 'Question': 'Pitanje', 'Question Details': 'Detalji pitanja', 'Question Meta-Data': 'Metapodaci pitanja', 'Question Meta-Data added': 'Metapodaci pitanja dodani', 'Question Meta-Data deleted': 'Metapodaci pitanja obrisani', 'Question Meta-Data Details': 'Detalji metapodataka pitanja', 'Question Meta-Data updated': 'Metapodaci pitanja ažurirani', 'Question Summary': 'Rezime pitanja', 'Race': 'Rasa', 'Race group': 'Rasna grupaShift', 'Race, complexion': 'Rasa, miješana', 'Radio Callsign': 'Pozivni znak za radio', 'Radio Details': 'Radio detalji', 'Radiological Hazard': 'Radiološka opasnost', 'Radiology': 'Radiologija', 'Railway Accident': 'Željeznička nesreća', 'Railway Hijacking': 'Razbojništvo na željeznici', 'Rain Fall': 'Padanje kiše', 'RAM Cache Keys': 'RAM cache ključevi', 'Ram Cleared': 'Ram obrisan', 'Rapid Assessment': 'Brza procjena', 'Rapid Assessment added': 'Brza procjena dodana', 'Rapid Assessment deleted': 'Izbrisana brza procjena', 'Rapid Assessment Details': 'Detalji brze procjene', 'Rapid Assessment updated': 'Brza procjena ažurirana', 'Rapid Assessments': 'Brze Procjene', 'Rapid Assessments & Flexible Impact Assessments': 'Brze procjene i fleksibilne procjene utjecaja', 'Rapid Close Lead': 'Jaki i grupni grad', 'Rapid Data Entry': 'Brzi unos podataka', 'Rating': 'Rejting', 'Raw Database access': 'Direktni pristup bazi podataka', 'RC frame with masonry infill': 'RC okvir sa zidanim ispunjenjem', 'Read-Only': 'Samo za čitanje', 'Ready': 'Spreman', 'Real World Arbitrary Units': 'Proizvoljne jedinice iz realnog svijeta', 'Reason': 'Razlog', 'Receive': 'Preuzimanje', 'Receive %(opt_in)s updates:': 'Primite %(opt_in)s nadogradnje:', 'Receive New Shipment': 'Primi novu pošiljku', 'Receive Shipment': 'Prijem pošiljke', 'Receive this shipment?': 'Primiti ovu pošiljku?', 'Receive updates': 'Primi nadogradnje', 'Receive/Incoming': 'Prijem/dolaz', 'Received': 'Primljeno', 'Received By': 'Primljeno od strane', 'Received By Person': 'Osoba primila', 'Received date': 'Primljeno datuma', 'Received Item added': 'Dodata primljena stavka', 'Received Item deleted': 'Primljeni Predmet obrisan', 'Received Item Details': 'Detalji primjene stavke', 'Received Item updated': 'Primljena stavka je ažurirana', 'Received Shipment canceled': 'Rrimljena pošiljka otkazana', 'Received Shipment canceled and items removed from Inventory': 'Isporuka otkazana i stavke uklonjene iz skladišta', 'Received Shipment Details': 'Detalji primljene pošiljke', 'Received Shipment updated': 'Ažurirana primljena stavka', 'Received Shipments': 'Primljene pošiljke', 'Received/Incoming Shipments': 'Primljene/dolazne pošiljke', 'Receiving and Sending Items': 'Primanje i slanje stavki', 'Receiving Inventory': 'Prijem u skladište', 'Reception': 'Prijem', 'Recipient': 'Primalac', 'Recipient(s)': 'Primaoc(i)', 'Recipients': 'Primaoci', 'Recommendations for Repair and Reconstruction or Demolition': 'Prijedlozi za opravak i rekonstrukciju ili rušenje', 'Record': 'Zapis', 'Record %(id)s created': 'Zapis %(id)s kreiran', 'Record %(id)s updated': 'Zapis %(id)s ažuriran', 'RECORD A': 'ZAPIS A', 'Record added': 'Zapis dodan', 'Record already exists': 'Zapis već postoji', 'Record any restriction on use or entry': 'Zabilježi bilo kakva ograničenja prilikom korištenja ili pristupa', 'Record approved': 'Zapis odobren', 'RECORD B': 'ZAPIS B', 'Record could not be approved.': 'Zapis ne može biti potvrđen', 'Record could not be deleted.': 'Zapis ne može biti obrisan', 'Record deleted': 'Zapis obrisan', 'Record Details': 'Detalji zapisa', 'record does not exist': 'zapis ne postoji', 'Record ID': 'Id zapisa', 'record id': 'Id zapisa', 'Record id': 'Id zapisa', 'Record last updated': 'Zapis je posljednji put izmijenjen', 'Record not found': 'Zapis nije nađen', 'Record not found!': 'Zapis nije pronađen', 'Record Saved': 'Zapis spašen', 'Record updated': 'Zapis ažuriran', 'Record Updates': 'Ažuriranja zapisa', 'Recording and Assigning Assets': 'Snimanje i dodjela sredstava', 'Records': 'Zapisi', 'records deleted': 'Zapis obrisan', 'Records merged successfully.': 'Slogovi uspješno spojeni', 'Recovery': 'Oporavak', 'Recovery report added': 'Dodat izvjestaj o pronalaženju', 'Recovery report deleted': 'Izvještaj o pronalaženju izbrisan', 'Recovery report updated': 'Izvještaj o pronalaženju ažuriran', 'Recovery Request': 'Zahtjev za Obnovom', 'Recovery Request added': 'Zahtjev za povrat dodan', 'Recovery Request deleted': 'Zahtjev za povrat obrisan', 'Recovery Request updated': 'Zahtjev za povrat ažuriran', 'Recovery Requests': 'Zahtjevi za povrat', 'rectangular': 'pravougaona', 'Recurring': 'Ponavljajući', 'Recurring Cost': 'Ponavljajući troškovi', 'Recurring cost': 'Ponavljajući troškovi', 'Recurring costs': 'Povratni troškovi', 'Recurring Request?': 'Ponavljajući zahtjev?', 'Red': 'Crveno', 'red': 'crvena', 'Red Cross / Red Crescent': 'Crveni križ/Crveni polumjesec', 'Redirect URL': 'Preusmjeri URL', 'Reference Document': 'Referentni dokument', 'refresh': 'osvježi', 'Refresh Rate (seconds)': 'Brzina osvježavanja (sekunde)', 'Region': 'Oblast', 'Region added': 'Područje dodano', 'Region deleted': 'Područje obrisano', 'Region Details': 'Detalji oblasti', 'Region Location': 'Lokacija regiona', 'Region updated': 'Područje ažurirano', 'Regional': 'Regionalan', 'Regions': 'Oblasti', 'Register': 'Registruj', 'Register As': 'Registruj kao', 'Register for Account': 'Registruj se za korisnički nalog', 'Register Person': 'Registriraj osobu', 'Register Person into this Camp': 'Registruj Osobu u ovaj Kamp', 'Register Person into this Shelter': 'Registruj osobu u sklonište', 'Register them as a volunteer': 'Registruj ih kao volontere', 'Registered People': 'Registrirani ljudi', 'Registered users can': 'Registrovani korisnici mogu', 'Registered users can %(login)s to access the system': 'Potrebna je %(login)s da registrovani korisnici mogu da pristupe sistemu', 'Registration': 'Registracija', 'Registration added': 'Registracija zabilježena', 'Registration Details': 'Detalji registracije', 'Registration entry deleted': 'Registracija zabilježena', 'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': 'Prijava još čeka odobrenje od ovlaštene osobe (%s) - molimo pričekajte dok se prijava ne odobri.', 'Registration key': 'Ključ za registraciju', 'Registration not permitted': 'Registracija nije dozvoljena', 'Registration successful': 'Registracija uspješna', 'Registration updated': 'Registracija zabilježena', 'Registro de Organización': 'Registar organizacije', 'Registro de Refugios': 'Registar izbjeglica', 'Rehabilitation/Long Term Care': 'Rehabilitacija/ Briga u dužem trajanju', 'Reinforced masonry': 'Ojačano zidanje', 'Reject': 'Odbaci', 'Rejected': 'Odbijeno', 'Relationship': 'Odnos', 'Relative added': 'Dodat srodnik', 'Relative deleted': 'Obrisan srodnik', 'Relative Details': 'Povezani detalji', 'Relative updated': 'Srodnik ažuriran', 'Relatives': 'Srodnici', 'Reliable access to sanitation/hygiene items': 'Pouzdan pristup sanitarnim predmetima ', 'Relief': 'Pomoć', 'Relief Item': 'Stavka pomoći', 'Relief Item updated': 'Stavka pomoći je ažurirana', 'Relief Items': 'Predmeti pomoći', 'Relief Team': 'Tim za pomoć', 'Religion': 'Religija', 'Religious': 'Vjerski', 'Religious Leader': 'Vjerski Vođa', 'Reload': 'Ponovo učitaj', 'reload': 'ponovo učitaj', 'Relocate as instructed in the <instruction>': 'Premjesti kako je navedeno u <instruction>', 'Remarks': 'Napomene', 'Remember Me': 'Zapamti me', 'Remote Error': 'Greška udaljenog servisa.', 'Remove': 'Ukloni', 'Remove Activity from this event': 'Ukloni Aktivnost iz ovog događaja', 'Remove all log entries': 'Ukloni stavke zapisnika', 'Remove Asset from this event': 'Sklonite sredstvo sa ovog događaja', 'Remove Asset from this incident': 'Odstrani sredstvo iz ovog incidenta', 'Remove Asset from this scenario': 'Odstrani sredstvo iz ovog scenarija', 'Remove Bookmark': 'Ukloni zabilješku', 'Remove Coalition': 'Ukloni koaliciju', 'Remove Document from this request': 'Ukloni dokument iz ovog zahtjeva', 'Remove existing data before import': 'Obriši postojeće podatke prije uvoza', 'Remove Facility from this event': 'Ukloni objekat iz ovog događaja', 'Remove Facility from this incident': 'Ukloni ovaj objekt iz ovog incidenta', 'Remove Facility from this scenario': 'Ukloni objekat iz ovog scenarija', 'Remove Feature: Select the feature you wish to remove & press the delete key': 'Ukloni karakteristiku: Izaberite karakteristiku koju želite ukloniti i pritisnite dugme za brisanje', 'Remove Human Resource from this event': 'Skloniti ljudske resurse sa ovog događaja', 'Remove Human Resource from this incident': 'Uklonite ovaj ljudski resusr sa ovog incidenta', 'Remove Human Resource from this scenario': 'Uklonite ljudske resurse iz ovog scenarija', 'Remove Incident from this event': 'Ukloni ovaj incident iz datog događaja', 'Remove Incident Report from this event': 'Ukloni izvještaj o incidentu za ovaj događaj', 'Remove Incident Report from this incident': 'Odstrani izvještaj o incidentu iz ovog incidenta', 'Remove Incident Type from this event': 'Ukloni tip incidenta za ovaj događaj', 'Remove Item from Inventory': 'Ukloni stavku iz inventara', 'Remove Layer from Profile': 'Ukloni sloj s profila', 'Remove Layer from Symbology': 'Ukloni sloj s značenja simbola', 'Remove Map Profile from this event': 'Ukloni podešavanje mape iz ovog događaja', 'Remove Map Profile from this incident': 'Ukloni konfiguraciju mape s ovog incidenta', 'Remove Map Profile from this scenario': 'Ukloni podsešavanje mape iz ovog scenarija', 'Remove Network': 'Ukloni mrežu', 'Remove Organization from Project': 'Ukloni organizaciju iz projekta', 'Remove People from Commitment': 'Ukloni Osobu iz Obavezivanja', 'Remove Person from Commitment': 'Ukloni osobu iz zaduženja', 'Remove Person from Group': 'Ukloni osobu iz grupe', 'Remove Person from Team': 'Odstrani osobu iz tima', 'Remove Profile Configuration for Layer': 'Ukloni konfiguraciju profila za ovaj sloj', 'Remove selection': 'Uklonite trenutni odabir', 'Remove Skill': 'Obriši vještinu', 'Remove Skill from Request': 'Ukloni vještinu iz zahtjeva', 'Remove Stock from Warehouse': 'Ukloni zalihu iz skladišta', 'Remove Symbology from Layer': 'Ukloni značenja simbola s sloja', 'Remove Tag for this Event from this Post': 'Ukloni oznaku ovog događaja za ovaj ubacivi tekst', 'Remove Task from this event': 'Izbrišite zadatak sa ovog događaja', 'Remove Task from this incident': 'Odstrani zadatak iz ovog incidenta', 'Remove Task from this scenario': 'Ukloni zadatak iz ovog scenarija', 'Remove this asset from this event': 'Ukloni ovo sredstvo iz datog događaja', 'Remove this asset from this scenario': 'Ukloni sredstvo iz ovog scenarija', 'Remove this entry': 'Ukloni ovaj unos', 'Remove this facility from this event': 'Uklonite ovaj objekat iz ovog događaja', 'Remove this facility from this scenario': 'Ukloni ovaj objekt iz ovog scenaria', 'Remove this human resource from this event': 'Uklonite ovaj ljudski resusr sa ovog dešavanja', 'Remove this human resource from this scenario': 'Izbriši ovaj ljudski resurs sa ovog scenarija', 'Remove this task from this event': 'Ukloni ovaj zadatak sa ovog događaja', 'Remove this task from this scenario': 'Ukloni ovaj zadatak sa scenarija', 'Remove Vehicle from this incident': 'Odstrani vozilo iz ovog incidenta', 'Removed from Group': 'Odstranjen iz grupe', 'Removed from Team': 'Odstranjen iz tima', 'Reopened': 'Ponovo otvoren', 'Repacked By': 'Prepakovao', 'Repair': 'Popravi', 'Repaired': 'Popravljeno', 'Repairs': 'Popravke', 'Repeat': 'Ponovi', 'Repeat your password': 'Ponovite vašu lozinku', 'replace': 'zamijeni', 'Replace': 'Zamijeni', 'Replace All': 'Zamijeni sve', 'Replace if Master': 'Zamjeni ukoliko je Master', 'Replace if Newer': 'Zamijeni ako je novije', 'Replace with Remote': 'Zamijeni s udaljenim', 'Replace/Master': 'Zamjeni/Master', 'Replace/Newer': 'Zamijeni/novije', 'Replies': 'Odgovori', 'Reply': 'Odgovor', 'Reply Message': 'Poruka za automatski odgovor', 'Report': 'Izvještaj', 'Report a Found Person': 'Prijavi pronalazak osobe', 'Report a Missing Person': 'Prijavite nestanak osobe', 'Report a Problem with the Software': 'Prijavi problem sa softverom', 'Report added': 'Dodan izvještaj', 'Report Another Assessment...': 'Prijavite još jednu procjenu...', 'Report by Age/Gender': 'Izvještaj po starosti/spolu', 'Report deleted': 'Obrisan izvještaj', 'Report Details': 'Detalji izvještaja', 'Report my location': 'Prijavi moju lokaciju', 'Report of': 'Izvještaj za', 'Report on Annual Budgets': 'Izvještaj o godišnjem budžetu', 'Report Options': 'Opcije izvještaja', 'Report Resource': 'Prijavi resurs', 'Report that person missing': 'Prijavite nestanak osobe', 'Report the contributing factors for the current EMS status.': 'Izvjesititi o faktorima koji utiču na trenutni status hitne medicinske pomoći', 'Report the contributing factors for the current OR status.': 'Prijavi faktore koji doprinose trenutnom OR statusu.', 'Report them as found': 'Prijavi ih kao pronađene', 'Report them missing': 'Prijavite njihov nestanak', 'Report To': 'Prijavi na', 'Report Types Include': 'Tipovi izvještaja sadrže', 'Report updated': 'Ažuriran izvještaj', 'Reported By': 'Prijavio%2', 'Reported By (Not Staff)': 'Izvijestio (Nije osoblje)', 'Reported By (Staff)': 'Prijavio (osoblje)', 'Reported To': 'Prijavljeno', 'Reporter': 'Izvjestilac', 'Reporter Name': 'Ime izvjestioca', 'Reporter:': 'Izvjestilac:', 'Reporting on the projects in the region': 'Izvještavanje o projektima u regionu', 'ReportLab module not available within the running Python - this needs installing for PDF output!': 'ReportLab modul nije dostupan unutar pokrenutog Pythona-potrebna je instalacija PDF izlaza!', 'ReportLab module not available within the running Python - this needs installing to do PDF Reporting!': 'Modul ReportLab nije dostupan unutar pokrenutog Python-a, ovo zahtijeva instalaciju PDF izlaza!', 'ReportLab not installed': 'ReportLab nije instaliran', 'Reports': 'Izvještaji', 'reports successfully imported.': 'Izvještaji uspješno uvezeni', 'Repositories': 'Repozitoriji', 'Repository': 'Repozitorijum', 'Repository Base URL': 'Osnovni repozitorijski URL...', 'Repository Configuration': 'Konfiguracija repozitorija', 'Repository configuration deleted': 'Obrisana konfiguracija repozitorija', 'Repository configuration updated': 'Ažurirana konfiguracija repozitorija', 'Repository configured': 'Repozitorij konfigurisan', 'Repository Name': 'Ime repozitorija', 'Repository Type': 'Tip repozitorija', 'Repository UUID': 'UUID repozitorija', 'representation of the Polygon/Line.': 'predstavljanje Poligona/Linije', 'Request': 'Zahtjev', 'Request Added': 'Dodan Zahtjev', 'Request added': 'Zahtjev dodan', 'Request Aid': 'Zahtijevaj pomoć', 'Request Canceled': 'Zahtjev otkazan', 'Request deleted': 'Zahtjev obrisan', 'Request Detail': 'Zahtijevaj detalj', 'Request Details': 'Zatražiti detalje', 'Request for Role Upgrade': 'Zahtjev za nadogradnju uloge', 'Request From': 'Zahtjev od', 'Request from Facility': 'Zahtjev s objekta', 'Request Item': 'Zahtjev za predmetom', 'Request Item added': 'Stavka zahtjeva dodana', 'Request Item deleted': 'Traženi artikal obrisan.', 'Request Item Details': 'Zatraži detalje o predmetu', 'Request Item from Available Inventory': 'Zahtjevaj stavku iz dostupnog inventara', 'Request Item updated': 'Zahtijevana stavka je ažurirana', 'Request Items': 'Zatraži stavke', 'Request Job': 'Zahtijevaj posao', 'Request Management': 'Upravljanje zahtjevima', 'Request New People': 'Zatraži nove ljude', 'Request Schedule': 'Raspored zahtjeva', 'Request Status': 'Zahtjev za status', 'Request Status updated': 'Status zahtjeva ažuriran', 'Request Stock from Available Warehouse': 'Zahtijevaj zalihu iz dostupnih skladišta', 'Request Template Added': 'Predložak zahtjeva dodan', 'Request Template Deleted': 'Predložak zahtjeva obrisan', 'Request Template Details': 'Detalji predloška zahtjeva', 'Request Template Updated': 'Predložak zahtjeva ažuriran', 'Request Templates': 'Predlošci zahtjeva', 'Request Type': 'Tip zahtjeva', 'Request Updated': 'Zahtjev ažuriran', 'Request updated': 'Zahtjev ažuriran', 'Request, Response & Session': 'Zahtjev, odgovor i sesija', 'Requested': 'Zahtijevano', 'Requested By': 'Zatraženo od strane', 'Requested by': 'Zatraženo od strane', 'Requested By Facility': 'Zahtijevano po objektima', 'Requested By Location': 'Zahtjevano po lokacijama', 'Requested By Warehouse': 'Zahtjevano po skladištu', 'Requested For': 'Zahtijevano za', 'Requested For Facility': 'Zahtjevano po objektima', 'Requested for Site': 'Zahtjevano po mjestu', 'Requested For Site': 'Zahtjevano po mjestu', 'Requested From': 'Traženo Od', 'Requested From Warehouse': 'Zahtjevano iz skladišta', 'Requested Items': 'Zatraženi predmeti', 'Requested on': 'Zahtijevano na', 'Requested Skill': 'Zahtijevana vještina', 'Requested Skill Details': 'Detalji o traženoj vještini', 'Requested Skill updated': 'Zahtijevana vještina ažurirana', 'Requested Skills': 'Tražene vještine', 'Requester': 'Zahtjevaoc', 'Requests': 'Zahtjevi', 'Requests Management': 'Upravljanje zahtjevima', 'Requests Report': 'Izvještaj o zahtjevima', 'Required by other servers.': 'Zahtijevano od strane drugog servera', 'Required Skill': 'Potrebne vjestine', 'Required Skills': 'Potrebne vještine', 'Required Skills (optional)': 'Potrebne vještine (opciono)', 'Requires login': 'Potrebna prijava', 'Requires Login': 'Potrebna prijava', 'Requires Login!': 'Potrebna prijava!', 'Rescue Ambulance': 'Spasilačka kola hitne pompoći', 'Rescue and recovery': 'Spašavanje i oporavak', 'Rescue Vehicle Tactical Assistance': 'Taktička pomoć vozila za spašavanje', 'Reset': 'Ponovno postavljanje', 'Reset form': 'Vradi formular na početak', 'Reset Password': 'Promijeni lozinku', 'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': 'Raširenje karakteristike: Izaberite karakteristiku kojoj želite promijeniti veličinu i prevucite pridruženu tačku željenoj veličini', 'Resolve': 'Razriješi', 'Resolve Conflict': 'Razriješi konflikt', 'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Link za Riješi prikazuje novi ekran koji pomaže da se riješi problem sa duplim zapisima i ažurira baza podataka', 'Resolved': 'Riješeno', 'Resource': 'Resurs', 'Resource added': 'Resurs je dodan.', 'Resource Configuration': 'Podešavanje resursa', 'Resource configuration deleted': 'Obrisana konfiguracija resursa', 'Resource configuration updated': 'Ažurirana konfiguracija resursa', 'Resource configured': 'Resurs konfigurisan', 'Resource deleted': 'Resurs je obrisan.', 'Resource Details': 'Detalji o resursima', 'Resource Inventory': 'Zalihe resursa', 'Resource Name': 'Ime resursa', 'Resource Type': 'Tip resursa', 'Resource Type added': 'Vrsta resursa dodana', 'Resource Type deleted': 'Vrsta resursa obrisana', 'Resource Type Details': 'Detalji tipa resursa', 'Resource Type updated': 'Vrsta resursa ažurirana', 'Resource Types': 'Tipovi resursa', 'Resource updated': 'Resurs je ažuriran.', 'Resources': 'Resursi', 'Respiratory Infections': 'Infekcije respiratornih puteva', 'Responded': 'Odgovoreno', 'Responder(s)': 'Odgovorili', 'Responding': 'Odgovara', 'Response': 'Odgovor', 'RESPONSE': 'ODGOVOR', 'Response deleted': 'Odgovor izbrisan', 'Response Details': 'Detalji o odgovoru', 'Response Summaries': 'Sumarni odgovori', 'Response Summary Added': 'Sumarni odgovor dodan', 'Response Summary Deleted': 'Sumarni odgovori obrisan', 'Response Summary Details': 'Detalji sumarnog odgovora', 'Response Summary Report': 'Izvještaj sumarnog odgovora', 'Response Summary Updated': 'Sumarni odgovor ažuriran', 'Responses': 'Odgovori', 'Restricted Access': 'Ograničen pristup', 'Restricted Use': 'Ograničena upotreba', 'Restrictions': 'Ograničenja', 'Results': 'Rezultati', 'Retail Crime': 'Kriminal u maloprodaji', 'retired': 'penzionisan', 'Retrieve Password': 'Preuzeti Lozinku', 'retry': 'pokušaj ponovo', 'Return': 'Vraćanje', 'Return to Request': 'Povratak na zahtjev', 'Returned': 'Vraćeno', 'Returned From': 'Vraćeno sa', 'Returning': 'Vraćanje', 'Revert Entry': 'Vrati unos', 'Review': 'Pregled', 'Review Incoming Shipment to Receive': 'pregled nadolazeće pošiljke', 'Review the situation on maps.': 'Pogledaj situaciju na mapi.', 'Revised Quantity': 'Količina revidirana', 'Revised Status': 'Revidirani status', 'Revised Value per Pack': 'Revidirana vrijednost po paketu', 'RFA Priorities': 'RFA Prioriteti', 'RFA1: Governance-Organisational, Institutional, Policy and Decision Making Framework': 'RFA1: Radni okvir za vladu i organizacije, institucije, politiku i donošenje odluka', 'RFA2: Knowledge, Information, Public Awareness and Education': 'RFA2: Znanje, informacije, javno obavještavanje i obrazovanje', 'RFA3: Analysis and Evaluation of Hazards, Vulnerabilities and Elements at Risk': 'RFA3: Analiza i procjena rizika, ranjivosti i elemenata rizika', 'RFA4: Planning for Effective Preparedness, Response and Recovery': 'RFA4: Planiranje za efektivnu pripremljenost, odgovor i obnovu', 'RFA5: Effective, Integrated and People-Focused Early Warning Systems': 'RFA5: Efektivni, integrisani i fokusirani na ljudstvo sistemi za rano upozoravanje', 'RFA6: Reduction of Underlying Risk Factors': 'RFA6: Smanjenje fundamentalnih faktora rizika', 'Rice': 'Riža', 'Rich Text?': 'Bogat tekst?', 'right': 'desno', 'Right-to-Left': 'Sa desna na lijevo', 'Riot': 'pobuna', 'Risk': 'Rizik', 'Risk transfer': 'Prijenos rizika', 'river': 'rijeka', 'River': 'Rijeka', 'River added': 'Dodana rijeka', 'River deleted': 'Rijeka izbrisana', 'River Details': 'Detalji o rijeci', 'River updated': 'Rijeka ažurirana', 'Rivers': 'Rijeke', 'Road Accident': 'Saobraćajna nesreća', 'Road Closed': 'Zatvorena cesta', 'Road Conditions': 'Stanje putnih pravaca', 'Road Delay': 'Odgađanje puta', 'Road Hijacking': 'Razbojništvo na putu', 'Road Usage Condition': 'Stanje na cesti', 'Roads Layer': 'Sloj puteva', 'Role': 'Uloga', 'Role added': 'Uloga dodana', 'Role assigned to User': 'Dodijeljena uloga korisniku', 'Role deleted': 'Uloga obrisana', 'Role Details': 'Detalji uloga', 'Role Name': 'Ime uloge', 'Role Required': 'Potrebna uloga', 'Role updated': 'Uloga ažurirana', 'Role Updated': 'Uloga izmjenjena', 'Role-based': 'baziran na ulozi', 'Roles': 'Uloge', 'Roles currently assigned': 'Trenutno dodijeljene uloge', 'Roles of User': 'Uloge korisnika', 'Roles Permitted': 'Dopuštene uloge', 'Roles updated': 'Uloge ažurirane', 'Roll On Roll Off Berth': 'Ro-ro brod', 'Roman': 'rimski', 'Romania': 'Rumunija', 'Roof tile': 'Crijep', 'Roofs, floors (vertical load)': 'Krovovi, podovi (vertikalno opterećenje)', 'Room': 'Soba', 'Room added': 'Dodana soba', 'Room deleted': 'Obrisana soba', 'Room Details': 'Detalji sobe', 'Room updated': 'Ažurirana soba', 'Rooms': 'Sobe', 'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': 'Rotiranje karakteristike: Izaberite karakteristiku koju želite rotirati i prevucite pridruženu tačku da rotirate na željenoj lokaciji', 'round': 'krug', 'Row Choices (One Per Line)': 'Red odgovora (Jedan po liniji)', 'Rows in table': 'Redova u tabeli', 'Rows in Table': 'Redova u tabeli', 'Rows selected': 'Izabrani redovi', 'RPC Service URL': 'URL RPC usluge', 'RSS Feed': 'RSS dovod', 'RSS Post deleted': 'RSS Ubacivi tekst obrisan', 'RSS Post Details': 'Detalji RSS ubacivog teksta', 'RSS Posts': 'RSS Ubacivi tekst', 'RSS Setting deleted': 'RSS podešavanje obrisano', 'RSS Setting Details': 'Detalji RSS postavki', 'RSS Settings': 'RSS Postavke', 'RSS settings updated': 'RSS podešavanje ažurirano', 'Run every': 'Pokreni svakih', 'Run Functional Tests': 'Pokreni funkcionalne testove', 'Run Interval': 'Interval izvrštavanja', 'Run Now': 'Pokreni sada', 'Running Cost': 'Trenutni troškovi', 'Rural Tank Tactical Vehicle': 'Seoska taktička pokretna cisterna', 'Russia': 'Rusija', 'Russian': 'ruski', 'Rwanda': 'Ruanda', 'Rápido Evaluaciones': 'Brze procjene', 'sack 20kg': 'vreća 20kg', 'sack 50kg': 'vreća 50kg', 'Safe environment for vulnerable groups': 'Sigurno okruženje za ugrožene grupe', 'Safety Assessment Form': 'Obrazac procjene sigurnosti', 'Safety of children and women affected by disaster': 'Sigurnost djece i žena ugroženih zbog katastrofe', 'Safety of children and women affected by disaster?': 'Sigurnost djece i žena ugroženih zbog prirodne nepogode?', 'Sahana access granted': 'Sahana pristup odobren', 'Sahana Community Chat': 'Chat Sahana udruženja', 'Sahana Eden <= Other sync (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <= ostalo sinhronizacija (Sahana Agasti, Ushahidi, itd.)', 'Sahana Eden <=> Other': 'Sahana Eden <=> Drugi', 'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> Ostalo (Sahana Agasti, Ushahidi, itd.)', 'Sahana Eden <=> Other sync (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> ostalo sinhronizacija (Sahana Agasti, Ushahidi, itd.)', 'Sahana Eden <=> Sahana Eden': 'Sahana Eden <=> Sahana Eden', 'Sahana Eden <=> Sahana Eden sync': 'Sahana Eden <=> Sahana Eden sinhronizacija', 'Sahana Eden Disaster Management Platform': 'Sahana Eden Platforma za vođenje aktivnosti u slučaju katastrofa', 'Sahana Eden Humanitarian Management Platform': 'Sahana Eden Platforma za vođenje humanitarnih aktivnosti', 'Sahana Eden portable application generator': 'Generator Sahana Eden prenosive aplikacije', 'Sahana Eden Website': 'Sahana Eden Web stranica', 'Sahana FOSS Disaster Management System': 'Sahana FOSS, sistem za upravljanje u slučaju katastrofa', 'Sahana Green': 'Sahana Green', 'Sahana is a collection of web based disaster management applications that provides solutions to large-scale humanitarian coordination and collaboration in disaster situation and its aftermath. Sahana consists of several modules for following functionalities': 'Sahana je kolekcija web baziranih aplikacija za upravljanje u slučaju katastrofe koja pruža rješenje za humanitarne koordinacije i saradnju većeg obima', 'Sahana Steel': 'Sahana čelik', 'Sahana Website': 'Sahana Web stranica', 'Saint Lucia': 'Sveta Lucija', 'Saint Vincent and the Grenadines': 'Sent Vincent i Grenadin', 'Sale': 'Prodaja', 'Salted Fish': 'Zasoljena riba', 'Salvage material usable from destroyed houses': 'Preostali materijal upotrebljiv iz uništenih kuća', 'Salvage material usable from destroyed schools': 'Preostali materijal upotrebljiv iz uništenih škola', 'Sanitation': 'Sanitacija', 'Sanitation problems': 'Sanitarni problemi', 'Satellite': 'Satelit', 'Satellite Layer': 'Satelitski sloj', 'Satellite Office': 'Pridruženi ured', 'Saturday': 'Subota', 'Saudi Arabia': 'Saudijska Arabija', 'Save': 'Snimi', 'Save and add Items': 'Snimi i dodaj stavke', 'Save and add People': 'Snimi i dodaj ljude', 'Save and Continue Editing': 'Snimi i nastavi uređivanje', 'Save any Changes in the one you wish to keep': 'Snimi promjene među onim koje želite zadržati', 'Save as New Map?': 'Snimi kao novu mapu?', 'Save Changes': 'Snimi promjene', 'Save Map': 'Snimi mapu', 'Save model as...': 'Snimi model kao...', 'Save: Default Lat, Lon & Zoom for the Viewport': 'Snimi: Podrazumijevane geografska širina i dužina i promjena veličine za prostor pogleda', 'Saved': 'Snimljeno', 'Saved filters': 'Snimljeni filteri', 'Saved Filters': 'Snimljeni filteri', 'Saved Filters...': 'Snimljeni filteri...', 'Saved Maps': 'Snimljene mape', 'Saved search added': 'Snimljeno traženje dodano', 'Saved search deleted': 'Snimljeno traženje obrisano', 'Saved search details': 'Snimljeni detalji pretrage', 'Saved search updated': 'Snimljeno traženje ažurirano', 'Saved Searches': 'Snimljene pretrage', 'Saved searches': 'Snimljene pretrage', 'Saved.': 'Spašeno.', 'Saving...': 'Snimam...', 'Scale of Results': 'Skala rezultata', 'Scanned Copy': 'Skenirana kopija', 'Scanned Forms Upload': 'Slanje skeniranih obrazaca', 'Scenario': 'Scenario', 'Scenario added': 'Scenarij dodat', 'Scenario deleted': 'Scenarij izbrisan', 'Scenario Details': 'Detalji scenarija', 'Scenario updated': 'Ažuriran scenario', 'Scenarios': 'Scenariji', 'Schedule': 'Raspored', 'Schedule synchronization jobs': 'Rasporedi poslove sinhronizacije', 'Scheduled Jobs': 'Zakazani poslovi', 'Schema': 'Shema', 'School': 'Škola', 'School activities': 'Školske aktivnosti', 'School assistance': 'Pomoć školi', 'School assistance received/expected': 'Nastavna pomoć primljena/očekivana', 'School attendance': 'Školsko prisustvo', 'School Closure': 'Zatvaranje škole', 'School Code': 'Šifra škole', 'School destroyed': 'Škola uništena', 'School District': 'Rejon škole', 'School District added': 'Dodat školski rejon', 'School District deleted': 'Školski rejon obrisan', 'School District Details': 'Detalji školskog rejona', 'School District updated': 'Ažuriran školski rejon', 'School Districts': 'Školski rejoni', 'School heavily damaged': 'Škola teško oštećena', 'School Holidays only': 'Samo školski praznici', 'School Lockdown': 'Škola je zaključana', 'School Report added': 'Dodat školski izvjestaj', 'School Report deleted': 'Izvještaj o školii izbrisan', 'School Report Details': 'Detalji izvještaja o školama', 'School Report updated': 'Izvještaj o školama ažuriran', 'School Reports': 'Školski izvještaj', 'School Safety': 'Sigurnost škole', 'School Teacher': 'Školski nastavnik', 'School tents received': 'Primljeni školski šatori', 'School tents, source': 'Školski šatori, izvor', 'School used for other purpose': 'Škola koja se koristi u druge svrhe', 'School/studying': 'Škola/učenje', 'Schools': 'Škole', 'Scubadiving Support Vehicle': 'Vozilo za ronioce', 'Seaport': 'Luka', 'Seaport added': 'Luka dodana', 'Seaport deleted': 'Luka obrisana', 'Seaport Details': 'Detalji luke', 'Seaport updated': 'Luka ažurirana', 'Seaports': 'Luka', 'Search': 'Potraži', 'Search & List Bins': 'Pretraga i prikaz korpi', 'Search & List Catalog': 'Pretraga i prikaz kataloga', 'Search & List Items': 'Traži i izlistaj stavke', 'Search & List Site': 'Pretraga i prikaz mjesta', 'Search & List Sub-Category': 'Traži i prikaži podkategoriju', 'Search Activities': 'Pretraži aktivnosti', 'Search Activity Report': 'Pretraga izvještaja o aktivnostima', 'Search Addresses': 'Adrese Pretrage', 'Search After Save?': 'Traži nakon snimanja', 'Search Aid Requests': 'Traži zahtjeve za pomoć', 'Search All Requested Items': 'Traži sve potrebne stavke', 'Search All Requested Skills': 'Traži sve zahtjevane vještine', 'Search Alternative Items': 'Traži alternativne predmete', 'Search and Edit Group': 'Pretraži i uredi grupu', 'Search and Edit Individual': 'Pretraži i Uredi Pojedinca', 'Search Assessment Summaries': 'Sumarno traženje sredstava', 'Search Assessments': 'Pretraži procjene', 'Search Asset Assignments': 'Traži dodjelu sredstava', 'Search Asset Log': 'Traži zapisnika o sredstvima', 'Search Assets': 'Traži sredstva', 'Search Baseline Type': 'Pretraga tipa referentne tačke', 'Search Baselines': 'Pretraži referentne tačke', 'Search Brands': 'Traži proizvođačke marke', 'Search Budgets': 'Pretraži budžete', 'Search Bundles': 'Pretraži pakete', 'Search by ID Tag': 'Pretraži po ID oznaci', 'Search by Skill Types': 'Pretraži po vrstama vještina', 'Search by skills': 'Traži po vještinama', 'Search by Skills': 'Traži po vještinama', 'Search Camp Services': 'Pretraži usluge kampa', 'Search Camp Types': 'Traži vrste kampova', 'Search Camps': 'Pretraži kampove', 'Search Catalog Items': 'Pretraži stavke kataloga', 'Search Catalogs': 'Pretraži kataloge', 'Search Category<>Sub-Category<>Catalog Relation': 'Traži kategorija<>Podkategorija<>kataloški odnos', 'Search Certificates': 'Traži certifikate', 'Search Certifications': 'Pretraga certifikata', 'Search Checklists': 'Pretraži kontrolnu listu', 'Search Cluster Subsectors': 'Podsektori za pretragu skupa', 'Search Clusters': 'Pretraži skupove', 'Search Commitment Items': 'Pretraži stavke zaduženja', 'Search Commitments': 'Traži zaduženja', 'Search Committed People': 'Pretraži zadužene ljude', 'Search Competencies': 'Pretraga stručnosti', 'Search Competency Ratings': 'Pretraži rejtinge kompetencija', 'Search Configs': 'Traži konfiguracije', 'Search Contact Information': 'Pretraga informacija o kontaktu', 'Search Contacts': 'Pretraži kontakte', 'Search Course Certicates': 'Pretraga certifikata kursa', 'Search Courses': 'Pretraga kurseva', 'Search Credentials': 'Traži akreditive', 'Search Criteria': 'Kriterij pretrage', 'Search Distribution Items': 'Pretraži stavke raspodjele', 'Search Distributions': 'Traži raspodjele', 'Search Documents': 'Pretraži dokumente', 'Search Donors': 'Traženje donatora', 'Search Email InBox': 'Traži E-maik dolazne poruke', 'Search Email OutBox': 'Traži SMS E-mail poruke', 'Search Entries': 'Traži stavke', 'Search Events': 'Pretraži događaje', 'Search Facilities': 'Pretraga Objekata', 'Search Feature Class': 'traži klasu karakteristika', 'Search Feature Groups': 'Traži grupe karakteristika', 'Search Feature Layers': 'Pretraga slojeva s karakteristikama', 'Search Find Report': 'Traži izvještaj za traženje', 'Search Flood Reports': 'Pretraga izvještaja o poplavi', 'Search for a commitment by Committer name, Request ID, Site or Organization.': 'Traži zaduženje po imenu zadužioca, ID zahtjeva, mjestu ili organizaciji', 'Search for a Hospital': 'Traži bolnicu', 'Search for a Location': 'Traži lokaciju', 'Search for a Location by name, including local names.': 'Traži Lokaciju pomoću imena, uključujući lokalne nazive.', 'Search for a Person': 'Potraga za osobom', 'Search for a Project': 'Pretraživanje projekta', 'Search for a Project by name, code, location, or description.': 'Traži projekt po imenu, šifri, lokaciji ili opisu', 'Search for a Project by name, code, or description.': 'Traži projekt po imenu, šifri ili opisu', 'Search for a Project Community by name.': 'Traži zajednicu projekta po imenu', 'Search for a Request': 'Pretraži zahtijev', 'Search for a request by Site name, Requester name or free text.': 'Traži zahtjev po imenu mjesta, zahtjevaocu ili slobodnom tekstu.', 'Search for a shipment by looking for text in any field.': 'Pretraži isporuke traženjem teksa iz bilo kog polja.', 'Search for a shipment received between these dates': 'Pretraži pošiljku primljenu između ovih datuma', 'Search for a shipment sent between these dates.': 'Pretraži pošiljku poslanu između ovih datuma', 'Search for a shipment which has an estimated delivery between these dates.': 'Traži pošiljke čija se isporuka očekuje između ovih datutma.', 'Search for a vehicle by text.': 'Pretraži vozilo po tekstu', 'Search for an asset by text.': 'Pronađi sredstva uz pomoć teksta', 'Search for an item by category.': 'Tražite predmet po kategoriji', 'Search for an item by brand.': 'Pretraga predmeta po marki', 'Search for an item by catalog.': 'Kataloška pretraga stavki', 'Search for an item by category.': 'Pretraga po kategoriji', 'Search for an item by its code, name, model and/or comment.': 'Pretraga predmeta po kodu, imenu, modelu i/ili komentaru', 'Search for an item by text.': 'Potraži stavku unosom teksta', 'Search for an item by Year of Manufacture.': 'Pretraga stavki po atributu: Godina proizvodnje', 'Search for an order by looking for text in any field.': 'Pretraži narudžbu traženjem teksta iz bilo kog polja.', 'Search for an order expected between these dates': 'Pretraži narudžbu očekivanu između ovih datuma', 'Search for an Organization by name or acronym': 'Potraži organizaciju po imenu ili akronimu', 'Search for an Organization by name or acronym.': 'Traži organizaciju po imenu ili akronimu', 'Search for asset by country.': 'Pretraga sredstava po državama', 'Search for asset by location.': 'Traženje sredstava po lokaciji.', 'Search for commitments available between these dates.': 'Traži obaveze dostupne između ovih datuma', 'Search for commitments made between these dates.': 'Traži obaveze načinjene između ovih datuma', 'Search for Items': 'Traži stavke', 'Search for items by donating organization.': 'Traži stavke po donatorskoj organizaciji', 'Search for items by owning organization.': 'Traži stavke po vlasničkoj organizaciji', 'Search for items with this text in the name.': 'Traži stavke s tim tekstom u imenu', 'Search for office by country.': 'Traži ured po zemlji', 'Search for office by location.': 'Pretraga ureda po lokaciji', 'Search for office by organization or branch.': 'Traži kancelariju po organizaciji ili ogranku.', 'Search for office by organization.': 'Pretraži kancelarije po organizaciji', 'Search for office by text.': 'Pretraga ureda po tekstu', 'Search for Persons': 'Traži po osobama', 'Search for requests made between these dates.': 'Traži zahtjeve napravljene između ovih datuma', 'Search for requests required between these dates.': 'Traži zahtjeve potrebne između ovih datuma', 'Search for Staff or Volunteers': 'Potraži osoblje ili volontere', 'Search for vehicle by location.': 'Traži vozilo po lokaciji', 'Search for warehouse by country.': 'Pretraga skladišta na osnovu atributa: Država', 'Search for warehouse by location.': 'Traži skladišta po lokaciji', 'Search for warehouse by organization.': 'Potraži skladište po organizaciji.', 'Search for warehouse by text.': 'Potraži skladište unosom teksta', 'Search GPS data': 'Pretraži GPS podatke', 'Search Groups': 'Traži grupe', 'Search here for a person record in order to:': 'Pretraži ovdje za zapis osobe koje su zabilježene da bi :', "Search here for a person's record in order to:": 'Pretraži ovdje za lične zapise da bi :', 'Search Homes': 'Pretraži domaćinstva', 'Search Hospitals': 'Traži bolnice', 'Search Human Resources': 'Pretraži ljudske resurse', 'Search Identity': 'Pretraži identitet', 'Search Images': 'Pretražuj slike', 'Search Impact Type': 'Traži tip utjecaja', 'Search Impacts': 'Pretraga utjecaja', 'Search Import Files': 'Pretražite uvezene datoteke', 'Search Incident Reports': 'Pretraži izvještaje o incidentima', 'Search Incidents': 'Traži incidente', 'Search Inventory Items': 'Pretraži artikle sa popisa', 'Search Inventory items': 'Pretraži predmete inventara', 'Search Inventory Stores': 'Pretraži skladišta inventara', 'Search Item Catalog(s)': 'Pretraži katalog(e) stavki', 'Search Item Categories': 'Pretraži kategorije stavki', 'Search Item Packets': 'Pretraga paketa stavki', 'Search Item Packs': 'Pretraga paketa predmeta', 'Search Item Sub-Category(s)': 'Traži podkategoriju stavke', 'Search Items': 'Pretraži stavke', 'Search Job Roles': 'Pretraži radna mjesta', 'Search Keys': 'Ključevi pretrage', 'Search Kits': 'Traži komplete', 'Search Layers': 'Pretraga slojeva', 'Search Level': 'Nivo pretrage', 'Search Level 1 Assessments': 'Traži procjene prvog nivoa', 'Search Level 2 Assessments': 'Pretraživanje procjena level 2', 'Search location in Geonames': 'Navedite lokaciju u Geonames', 'Search Locations': 'Traži lokacije', 'Search Log Entry': 'pretraži unose iz zapisnika', 'Search Map Profiles': 'Pretraži konfiguracije mape', 'Search Markers': 'Pretraži oznake', 'Search Member': 'Potraži člana', 'Search Members': 'Traži članove', 'Search Membership': 'Pretraži članstvo', 'Search Memberships': 'Pretraži članstva', 'Search messages': 'Pretraži poruke', 'Search Metadata': 'Traži metapodatke', 'Search Missions': 'Pretraži misije', 'Search Need Type': 'Pretraga tipa zahtijeva ', 'Search Needs': 'Traži potrebe', 'Search Notes': 'Traži bilješke', 'Search Offices': 'Tražite kancelarije', 'Search Organisations': 'Traži organizacije', 'Search Organizations': 'Pretraži organizacije', 'Search Partners': 'Traži partnere', 'Search Patients': 'Pretraga pacijenata', 'Search Peer': 'Pretraži saradnike', 'Search Peers': 'Traži saradnike', 'Search Personal Effects': 'Pretraži osobne učinke', 'Search Persons': 'Pretraži osobe', 'Search Photos': 'Pretraga fotografija', 'Search Population Statistics': 'Pretraga statistike stanovništva', 'Search Positions': 'Pretraži pozicije', 'Search Problems': 'Pretraži probleme', 'Search Projections': 'Pretraga projekcija', 'Search Projects': 'Pretraži projekte', 'Search Queries': 'Traži upite', 'Search Query': 'Traži upit', 'Search Rapid Assessments': 'Pretraži brze procjene', 'Search Received Items': 'Pretraži primljene stavke', 'Search Received Shipments': 'Pretraži primljene isporuke', 'Search Records': 'Pretraga zapisa', 'Search Recovery Reports': 'Pretraga izvještaja o pronalaženjima', 'Search Registations': 'Pretraga registracija', 'Search Registration Request': 'Potraži zahtjev za registraciju', 'Search Relatives': 'Pretraži srodnike', 'Search Report': 'Izvještaj pretrage', 'Search Reports': 'Pretraga Izvještaja', 'Search Request': 'Potražite zahtjev', 'Search Request Items': 'Pretraži zahtijevane stavke', 'Search Requested Items': 'Pretraži tražene jedinice', 'Search Requested Skills': 'Pretraga traženih sposobnosti', 'Search Requests': 'Zahtjevi za pretragu', 'Search Resources': 'Pretraži resurse', 'Search Responses': 'Pretraga odgovora', 'Search Results': 'Rezultati pretrage', 'Search Rivers': 'Traži rijeke', 'Search Roles': 'Traži uloge', 'Search Rooms': 'Traži sobe', 'Search Scenarios': 'Pretražo scenarije', 'Search School Districts': 'Pretraga rejona škole', 'Search School Reports': 'Pretraga Izvještajao školama', 'Search Sections': 'Traži sekcije', 'Search Sectors': 'Pretraži Sektore', 'Search Sent Email': 'Traži poslanu elektronsku poštu', 'Search Sent Items': 'Pretraži poslane stavke', 'Search Sent Shipments': 'Pretraži poslane pošiljke', 'Search Sent SMS': 'Pretraži poslane SMS', 'Search Service Profiles': 'Pretraživanje profila usluge', 'Search Settings': 'Postavke pretrage', 'Search Shelter Services': 'Traži uslugu skloništa', 'Search Shelter Types': 'Traži tipove skloništa', 'Search Shelters': 'Pretraži skloništa', 'Search Shipment<>Item Relation': 'Traži pošiljku<>Odnos predmeta', 'Search Shipped Items': 'Pretraži isporučene stavke', 'Search Site(s)': 'Traži mjesta', 'Search Skill Equivalences': 'Pretraži ekvivalenciju vještina', 'Search Skill Provisions': 'Pretraga Provizije vjestina', 'Search Skill Types': 'Pretraži tipove sposobnosti', 'Search Skills': 'Pretraži vještine', 'Search SMS InBox': 'Traži SMS dolazne poruke', 'Search SMS OutBox': 'Traži SMS odlazne poruke', 'Search Solutions': 'Traži rješenja', 'Search Sources': 'Traži izvore', 'Search Staff': 'Traži osoblje', 'Search Staff & Volunteers': 'Potraži osoblje ili volontere', 'Search Staff or Volunteer': 'Pretraži osoblje ili volontere', 'Search Staff Types': 'Traži tip osoblja', 'Search Status': 'Status pretrage', 'Search Storage Bin Type(s)': 'Traži vrste korpe za smještaj', 'Search Storage Bin(s)': 'Traži korpe za smještaj', 'Search Storage Location(s)': 'Traži lokacije o smještaju', 'Search Subscriptions': 'Pretraži Pretplatnike.', 'Search Subsectors': 'Pretražite podsektore', 'Search Support Requests': 'Pretraži zahtjeve za podršku', 'Search Tasks': 'Pretraga zadataka', 'Search Teams': 'Traži timove', 'Search Themes': 'Pretraži teme', 'Search Tickets': 'Pretraži kartice', 'Search Tracks': 'Pretraži tragove', 'Search Training Participants': 'Traži učesnike obuke', 'Search Trainings': 'Traži treninge', 'Search Twitter Tags': 'Pretraži Twitter tagove', 'Search Units': 'Pretraži jedinice', 'Search Users': 'Pretražite korisnike', 'Search Vehicle Details': 'Pretraži detalje vozila', 'Search Vehicles': 'Pretraga vozila', 'Search Volunteer Availability': 'Pretraži mogućnosti volontiranja', 'Search Volunteers': 'Pretraži volontere', 'Search Warehouse Items': 'Pretraži stavke skladišta', 'Search Warehouses': 'Pretraži skladišta', 'Searched?': 'Traženo?', 'Searching for different groups and individuals': 'Traženje različitih grupa i pojedinaca', 'secondary effect': 'sekundarni efekat', 'Secondary Server (Optional)': 'Sekundarni server (opcionalno)', 'seconds': 'sekundi', 'Seconds must be a number between 0 and 60': 'Sekunde moraju biti broj između 0 i 60', 'Seconds must be a number.': 'Sekunde moraju biti broj', 'Seconds must be less than 60.': 'Sekunde bi trebale biti broj manji od 60', 'Section': 'Odjeljak', 'Section deleted': 'Odjel izbrisan', 'Section Details': 'Detalji o odjelima', 'Section updated': 'Odjel ažuriran', 'Sections': 'Sekcije', 'Sections that are part of this template': 'Sekcije koje su dio ovog šablona', 'Sections that can be selected': 'Djelovi koji mogu biti odabrani', 'Sector': 'Sektor', 'Sector added': 'Sektor dodan', 'Sector added to Organization': 'Sektor dodan u organizaciju', 'Sector added to Project': 'Sektor dodan u projekt', 'Sector added to Theme': 'Sekttor dodan u temu', 'Sector deleted': 'Sektor obrisan', 'Sector Details': 'Detalji o sektoru', 'Sector removed from Organization': 'Sektor uklonjen iz organizacije', 'Sector removed from Project': 'Sektor uklonjen sa projekta', 'Sector removed from Theme': 'Sektor uklonjen iz teme', 'Sector updated': 'Sektor ažuriran', 'Sector(s)': 'Sektor(i)', 'Sectors': 'Sektori', 'Sectors to which this Activity Type can apply': 'Sektori na koje je ova vrsta aktivnosti primjenjiva', 'Sectors to which this Theme can apply': 'Sektori na koje je ova vrsta tema primjenjiva', 'Secure Storage Capacity': 'Sigurni kapacitet smještaja', 'Security': 'Sigurnost', 'Security Description': 'Sigurnosni opis', 'Security Policy': 'Politika sigurnosti', 'Security problems': 'Sigurnosni problemi', 'Security Required': 'Potrebna sigurnost', 'Security Status': 'Sigurnosni status', 'See a detailed description of the module on the Sahana Eden wiki': 'Vidi detaljan opis modula na Sahana Eden wiki', 'See all': 'Vidi sve', 'See All Entries': 'Pogledajte sve unose', 'see comment': 'prikaži komentar', 'see more': 'vidi više', 'See the universally unique identifier (UUID) of this repository': 'Postavi univerzalno jedinstveni identifikator (UUID) za ovaj repozitorij', 'See unassigned recovery requests': 'Pregledaj neraspoređene zahtjeve za oporavak', 'Seen': 'Viđeno', 'Select': 'Izaberi', 'Select %(location)s': '%(location)s za izbor', "Select 2 records from this list, then click 'Merge'.": "Odaberite 2 sloga iz liste i kliknite 'Spoji0", 'Select a label question and at least one numeric question to display the chart.': 'Odabertite pitanje oznake i barem jedno brojčano pitanje za prikaz dijagrama.', 'Select a location': 'Izaberite mjesto', "Select a manager for status 'assigned'": "Odaberi menadžera za status 'dodijeljeno'", "Select a person in charge for status 'assigned'": "Odaberi osobu zaduženu za status 'dodijeljeno'", 'Select a question from the list': 'Označite pitanje sa liste', 'Select a range for the number of total beds': 'Označite opseg za ukupan broj kreveta', "Select a Room from the list or click 'Add Room'": 'Izaberite sobu sa spiska ili pritisnite "Dodaj sobu"', "Select a Room from the list or click 'Create Room'": 'Izaberite sobu sa spiska ili kliknite "Kreiraj sobu"', 'Select all': 'Izaberi sve', 'Select All': 'Izaberi sve', 'Select all templates (All modules included)': 'Odaberi sve predloške (Svi moduli uključeni)', 'Select all that apply': 'Označi sve što se odnosi na to', 'Select an existing bin': 'Odaberi postojeću korpu', 'Select an image to upload. You can crop this later by opening this record.': 'Odaberite sliku za postavljanje. Možete je izreati kasnije za otvaranje ovog zapisa.', 'Select an Organisation to see a list of offices': 'Izaberi organizaciju za prikaz liste kancelarija', 'Select an Organization to see a list of offices': 'Izaberi organizaciju za prikaz liste kancelarija', 'Select Existing Location': 'Izaberi postojeću lokaciju', 'Select from registry': 'Izaberi iz registra', 'Select Items from the Request': 'Izaberite željene stavke', 'Select Items from this Inventory': 'Odaberite stavke iz ovog inventara', 'Select Label Question': 'Izaberi pitanje oznake', 'Select language code': 'Izaberi oznaku jezika', 'Select Modules for translation': 'Odaberite module za prevođenje', 'Select Modules which are to be translated': 'Odaberite module koji se trebaju prevesti', 'Select Numeric Questions (one or more):': 'Odaberite numerička pitanja (jedno ili više):', 'Select one or more option(s) that apply': 'Odaberite jednu ili dvije primjenjive opcije', 'Select Photos': 'Izaberi fotografije', 'Select resources to import': 'Izaberite resurse za uvesti', 'Select Skills from the Request': 'Izaberi vještine iz Zahtjeva', 'Select Stock from this Warehouse': 'Odaberite zalihu iz ovog skladišta', 'Select the default site.': 'Izaberi podrazumijevano mjesto', 'Select the language file': 'Izaberi jezičku datoteku', 'Select the option that applies': 'Odaberite primjenjivu opciju', 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Izaberite preklapanje za procjene i aktivnosti koje se odnose na svaku potrebu da se identifikuje propust.', 'Select the person assigned to this role for this project.': 'Odaberi osobu dodijeljenu za ovu ulogu za ovaj projekt', 'Select the person associated with this scenario.': 'Odaberi osobu dodijeljenu za ovaj scenario', 'Select the required modules': 'Izaberite potrebne module', "Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": 'Odaberite ovo ako sve određene lokacije trebaju roditelja na najdubljem nivou lokacijske hijerarhije. Na primjer, ako je "distrikt" najmanja podjela u hijerarhiji,to znači da sve određene lokacije moraju imati distrikt kao roditelja.', "Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": "Odaberite ovo ako sve specifične lokacije trebaju imati nadređenu lokaciju usljed hijerarhije. Ovo može pomoći u postavljanju 'regije' koja predstavlja pogođenu oblast", 'Select this if you need this resource to be mapped from site_id instead of location_id.': 'Odaberite ovo ako želite da se resurs mapira prema oznaci mjesta site_id umjesto lokacije location_id.', 'Select This Location': 'Odaberi ovu lokaciju', 'Select to show this configuration in the menu.': 'Odaberi prikaz ove konfiguracije u meniju', 'Select to show this configuration in the Regions menu.': 'Izaberite da vam se prikaže ova konfiguracija u meniju Regije', 'selected': 'odabran', 'Selected Jobs': 'Izabrani poslovi', 'Selected OCR Form has no pages. Use another revision of create a new revision by downloading a new Form.': 'Odabrani OCR nema stranice. Koristite drugu reviziju da kreirate novu reviziju preuzimanjem novog formulara.', 'Selected Questions for all Completed Assessment Forms': 'Odabrana pitanja za sve ispunjene formulare procjene', 'Selects what type of gateway to use for outbound SMS': 'Izabira tip mrežnog izlaza za izlazni SMS', 'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Bira da li koristiti modem, tropo ili neki drugi način za slanje SMS', 'Selects whether to use the gateway or the Modem for sending out SMS': 'Bira da li koristiti mrežni izlaz ili modem za slanje SMS', 'Self Registration': 'Samoregistracija', 'Self-care': 'Vlasita briga', 'Self-registration': 'Samoregistracija', 'Send': 'Pošalji', 'Send & Receive Email messages (e.g. for alerting)': 'Pošalji i primi Email poruke (npr. za alarmiranje)', 'Send & Receive SMS messages (e.g. for alerting)': 'Pošalji i primi SMS poruke (npr. za alarmiranje)', 'Send a message to this person': 'Pošalji poruku ovoj osobi', 'Send a message to this team': 'Pošalji poruku ovom timu', 'Send Alerts using Email &/or SMS': 'Poslati upozorenje koristeći Email i/ili SMS', 'Send batch': 'Paketno slanje', 'Send Commitment as Shipment': 'Pošalji zaduženje kao pošiljku', 'Send Dispatch Update': 'Pošalji ažuriranje raspodjele', 'Send Email': 'Pošalji e-mail', 'Send from %s': 'Pošalji sa %s ', 'Send Message': 'Šalji poruku', 'Send message': 'Pošalji poruku', 'Send new message': 'Pošalji novu poruku', 'Send New Shipment': 'Pošalji novu pošiljku', 'Send Notification': 'Pošalji obavijest', 'Send Shipment': 'Slanje pošiljke', 'Send SMS': 'Pošalji SMS', 'Send Task Notification': 'Obavijest o slanju zadatka', 'Sender': 'Pošiljalac', 'Sender deleted': 'Pošiljalac obrisan', 'Sender Priority updated': 'Prioritet pošiljaoca ažuriran', 'Sender Whitelisted': 'Pošiljaoc na bijeloj listi', 'Sends & Receives Alerts via Email & SMS': 'Šalje i prima upozorenja putem emaila i SMS-a', 'Senior (50+)': 'Stariji (50+)', 'Sensitivity': 'Osjetljivost', 'Sent': 'Poslano', 'Sent By': 'Poslano od', 'Sent By Person': 'Poslano putem osobe', 'Sent date': 'Poslano dana', 'Sent Emails': 'Poslana elektronska pošta', 'Sent Item deleted': 'Poslana stavka obrisan', 'Sent Item Details': 'Detalji poslanog predmeta', 'Sent Item updated': 'Poslana stavka ažurirana', 'Sent Shipment canceled': 'Poslana Pošiljka otkazana', 'Sent Shipment canceled and items returned to Inventory': 'Poslana pošiljka otkazana i predmeti vraćeni u inventar', 'Sent Shipment canceled and items returned to Warehouse': 'Poslana pošiljka otkazana i predmeti vraćeni u skladište', 'Sent Shipment Details': 'Detalji poslate pošiljke', 'Sent Shipment has returned, indicate how many items will be returned to Warehouse.': 'Poslana pošiljka je vraćena, navedite koliko će se stavki vratiti u skladište', 'Sent Shipment updated': 'Poslana pošiljka ažurirana', 'Sent Shipments': 'Poslate pošiljke', 'Sent SMS': 'Pošalji SMS', 'Sent Tweets': 'Pošalji Tweets', 'Separate latrines for women and men': 'Odvojeni zahodi za muškarce i žene', 'separated': 'razdvojeni', 'Separated children, caregiving arrangements': 'Djeca odvojena od roditelja, raspored skrbnika', 'separated from family': 'Odvojen/a od porodice', 'Seraiki': 'Seraiki', 'Serbia': 'Srbija', 'Serial Number': 'Serijski broj', 'Series': 'Serije', 'Series added': 'Serija dodana', 'Series deleted': 'Serija obrisana', 'Series Details': 'Detalji serije', 'Series details missing': 'Nedostaju detalji serije', 'Series updated': 'Serija ažurirana', 'Server': 'Server', 'Service': 'Usluga', 'Service added': 'Usluga dodana', 'Service added to Organization': 'Usluga dodana organizaciji', 'Service Catalogue': 'Katalog usluga', 'Service deleted': 'Usluga obrisana', 'Service Details': 'Detalji usluge', 'Service Due': 'Rok usluge', 'Service or Facility': 'Usluga ili objekat', 'Service profile added': 'Dodat profil usluge', 'Service profile deleted': 'Obrisan profil usluge', 'Service profile updated': 'Ažuriran profil usluge', 'Service Record': 'Zapis usluge', 'Service removed from Organization': 'Usluga uklonjena iz organizacije', 'Service updated': 'Usluga ažurirana', 'Services': 'Usluge', 'Services Available': 'Dostupne usluge', 'Set as default Site': 'Postavi kao podrazumijevano mjesto', 'Set as my Default': 'Postavi kao moje podrazumijevano', 'Set Base Facility/Site': 'Postavljeno mjesto/objekt baze', 'Set Base Site': 'Postavi osnovnu lokaciju', 'Set By': 'Postavi prema', 'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'Postaviti na True da se dozvoli uređivanje ovog nivoa hierarhije lokacija korisnicima koji nisu MapAdmin.', 'Setting added': 'Podešavanje dodano', 'Setting deleted': 'Podešavanje obrisano', 'Setting Details': 'Detalji postavke', 'Setting updated': 'Postavka ažurirana', 'Settings': 'Postavke', 'Settings updated': 'Podešavanja ažurirana', 'Settings were reset because authenticating with Twitter failed': 'Postavke su obrisane zbog neuspjele autentifikacije sa Twitterom', 'Settings which can be configured through the web interface are available here.': 'Postavke, koje je moguće konfigurisati putem web sučelja, su dostupne ovdje.', 'Severe': 'Strogo', 'Severity': 'Ozbiljnost', 'Severity:': 'Ozbiljnost:', 'Sex': 'Spol', 'Seychelles': 'Sejšeli', 'shallow': 'plitak', 'Shapefile Layer': 'Sloj datoteke s likovima', 'Share': 'Dijeli', 'Share a common Marker (unless over-ridden at the Feature level)': 'Podijeli zajednički marker (osim ukoliko nije zamijenjeno na nivou karakteristika)', 'shaved': 'obrijano', 'Shelter': 'Sklonište', 'Shelter & Essential NFIs': 'Sklonište & osnovni neprehrambeni artikli', 'Shelter added': 'Sklonište dodato', 'Shelter deleted': 'Sklonište obrisano', 'Shelter Details': 'Detalji o skloništu', 'Shelter Manager': 'Menadžer skloništa', 'Shelter Name': 'Naziv sklonista', 'Shelter Registry': 'Registar skloništa', 'Shelter Service': 'Usluge u skloništu', 'Shelter Service added': 'Usluga skloništa dodana', 'Shelter Service deleted': 'Usluga skloništa obrisana', 'Shelter Service Details': 'Detalji usluga skloništa', 'Shelter Service updated': 'Usluga skloništa ažurirana', 'Shelter Services': 'Usluga skloništa', 'Shelter Settings': 'Postavke skloništa', 'Shelter Status': 'Status skloništa', 'Shelter Status added': 'Status skloništa dodan', 'Shelter Status deleted': 'Status skloništa obrisan', 'Shelter Status Details': 'Detalji statusa skloništa', 'Shelter Status updated': 'Status skloništa ažuriran', 'Shelter Statuses': 'Statusi skloništa', 'Shelter Type': 'Tip skloništa', 'Shelter Type added': 'Tip skloništa dodan', 'Shelter Type deleted': 'Tip skloništa obrisan', 'Shelter Type Details': 'Detalji o tipu skloništa', 'Shelter Type updated': 'Tip skloništa ažuriran', 'Shelter Types': 'Tipovi skloništa', 'Shelter Types and Services': 'Vrste i usluge skloništa', 'Shelter updated': 'Sklonište ažurirano', 'Shelter/NFI Assistance': 'Sklonište/Pomoć u neprehrambenim artiklima', 'Shelter/NFI assistance received/expected': 'Sklonište/Pomoć u neprehrambenim artiklima/očekivano', 'Shelters': 'Skloništa', 'shift_start': 'pomjeranje_početka', 'Shipment': 'Pošiljka', 'Shipment Created': 'Pošiljka napravljena', 'Shipment Details': 'Detalji pošiljke', 'Shipment Item deleted': 'Predmet dostave obrisan', 'Shipment Item Details': 'Detalji stavki pošiljke', 'Shipment Item updated': 'Stavka dostave ažurirana', 'Shipment Items': 'Stavke pošiljke', 'Shipment Items Received': 'Primljene stavke pošiljke', 'Shipment Items received by Inventory': 'Pošiljke primljene u skladište', 'Shipment Items sent from Inventory': 'Isporuke stavki poslatih iz inventara', 'Shipment Items sent from Warehouse': 'Isporuke stvaki poslatih iz skladišta', 'Shipment received': 'Primljena pošiljka', 'Shipment to Receive': 'Pošiljka za prijem', 'Shipment to Send': 'Pošiljka za poslati', 'Shipment Type': 'Tip pošiljke', 'Shipment/Way Bills deleted': 'Dostavnica/putni nalog obrisana', 'Shipments': 'Pošiljke', 'Shipments To': 'Pošiljka do', 'Shipping Organization': 'Organizacija dostave', 'Shooting': 'Pucnjava', 'short': 'kratki', 'Short Assessment': 'Kratka procjena', 'Short Description': 'Kratak Opis', 'Short Description:': 'Kratak opis:', 'Short Text': 'Kratki tekst', 'Short Title / ID': 'Kratki naslov / ID', 'short<6cm': 'kratak<6cm', 'Show': 'Prikaži', 'Show %(number)s entries': 'Prikaži %(number)s članova', 'Show author picture?': 'Prikaži sliku autora', 'Show Checklist': 'Prikaži kontrolnu listu', 'Show Details': 'Prikaži detalje', 'Show in Menu?': 'Prikazati u Meniju ?', 'Show Location?': 'Prikaži lokaciju?', 'Show Map': 'Prikaži kartu', 'Show on map': 'Prikaži na mapi', 'Show on Map': 'Prikaži na karti', 'Show Region in Menu?': 'Pokaži regiju u meniju?', 'Show Table': 'Prikaži tabelu', 'Show totals': 'Prikaži sumarne kolone', 'Showing 0 to 0 of 0 entries': 'Prikaz 0 do 0 od 0 unosa', 'Showing _START_ to _END_ of _TOTAL_ entries': 'Prikazujem _START_ do _END_ od _TOTAL_ zapisa', 'Showing latest entries first': 'Zadnji unosi se prvi prikazuju', 'sides': 'strane', 'Sierra Leone': 'Sierra Leone', 'Sign-up as a volunteer': 'Prijavi se kao volonter', 'Sign-up for Account': 'Prijavi se za Račun', 'sign-up now': 'Prijavi se sada', 'Sign-up succesful - you should hear from us soon!': 'Prijava uspješna, uskoro ćemo Vam se javiti!', 'Signature': 'Potpis', 'Signature / Stamp': 'Potpis / Pečat', 'simple': 'jednostavan', 'Sindhi': 'Sindi', 'Singapore': 'Singapur', 'single': 'samac', 'Single PDF File': 'Jedan PDF dokument', 'Site': 'Lokacija', 'Site Administration': 'Administracija lokacije', 'Site Contact': 'Kontakt mjesta', 'Site ID': 'ID mjesta', 'Site Key': 'Ključ mjesta', 'Site Key which this site uses to authenticate at the remote site (if required for this type of repository).': 'Ključ sajta kojim se on prijavljuje na udaljeni sajt (ako je potrebno za ovu vrstu repozitorija).', 'Site Location Description': 'Opis lokacije mjesta', 'Site Location Name': 'Naziv lokacije mjesta', 'Site Manager': 'Menadžer mjesta', 'Site Name': 'Ime mjesta', 'Site Needs': 'Potrebe mjesta', 'Site Needs added': 'Potrebe mjesta dodane', 'Site Needs deleted': 'Potrebe mjesta obrisane', 'Site Needs updated': 'Potrebe mjesta ažurirane', 'Site/Warehouse': 'Mjesto/Skladište', 'Sites': 'Mjesta', 'SITUATION': 'SITUACIJA', 'Situation': 'Situacija', 'Situation Awareness': 'Svjesnost Situacije', 'Situation Awareness & Geospatial Analysis': 'Svjesnost Situacije & Geoprostorna Analiza', 'Situation Map': 'Mapa situacije', 'Size of cache:': 'Veličina keša:', 'Skeleton Example': 'Primjer skeleta', 'Sketch': 'Nacrt', 'Skill': 'Vještina', 'Skill added': 'Vještina dodana', 'Skill added to Request': 'Vještina dodana zahtjevu', 'Skill Catalog': 'Katalog vještina', 'Skill deleted': 'Vještina obrisana', 'Skill Details': 'Detalji VJEŠTINE', 'Skill Equivalence': 'Ekvivalencija vještine', 'Skill Equivalence added': 'Dodata ekvivalencija vještine', 'Skill Equivalence deleted': 'Obrisana ekvivalencija vještine', 'Skill Equivalence Details': 'Detalji ekvivalentnosti vještina', 'Skill Equivalence updated': 'Ažurirana ekvivalencija vještine', 'Skill Equivalences': 'Ekvivalencije vještina', 'Skill Provision': 'Pružanje vještina', 'Skill Provision added': 'Pružanje vještine dodato', 'Skill Provision Catalog': 'Katalog pribavljanja vještina', 'Skill Provision deleted': 'Pružanje vještina obrisano', 'Skill Provision Details': 'Detalji o pružanju vještina', 'Skill Provision updated': 'Provizija vjestina ažurirana', 'Skill Provisions': 'Odredba Vještina', 'Skill removed': 'Uklonjena vještina', 'Skill removed from Request': 'Uklonjena vještina iz Zahtjeva', 'Skill Status': 'Status vještina', 'Skill TYpe': 'Vrsta vještine', 'Skill Type': 'Tip sposobnosti', 'Skill Type added': 'Dodan tip vještine', 'Skill Type Catalog': 'Katalog vrsta vještina', 'Skill Type deleted': 'Obrisan tip vještine', 'Skill Type Details': 'Detalji tipa sposobnosti', 'Skill Type updated': 'Ažuriran tip vještine', 'Skill Types': 'Tipovi vještina', 'Skill updated': 'Vještina ažurirana', 'Skills': 'Vještine', 'Skills Catalog': 'Katalog vještina', 'Skills Management': 'Upravljanje vještinama', 'Skin Marks': 'Oznake na koži', 'slight': 'pomalo', 'slim': 'vitak', 'Slope failure, debris': 'Propast padine , krhorine', 'Slovakia': 'Slovačka', 'Slovenia': 'Slovenija', 'small': 'mali', 'Small scale mitigation': 'Smanjenje u malom stepenu', 'Small Trade': 'Mala trgovina', 'Smoke': 'Dim', 'Smoking habits': 'Pušačke navike', 'SMS added': 'SMS dodan', 'SMS deleted': 'SMS obrisan', 'SMS Details': 'Detalji o SMS', 'SMS Gateway Settings': 'Postavke SMS izlaza', 'SMS InBox': 'SMS dolazne poruke', 'SMS Modems (Inbound & Outbound)': 'SMS Modemi (ulazni i izlazni)', 'SMS Outbound': 'SMS van granica', 'SMS Outbound Gateway': 'SMS izlaz', 'SMS Outbound Gateway updated': 'SMS mrežni izlaz ažuriran', 'SMS OutBox': 'SMS odlazne poruke', 'SMS Settings': 'SMS Postavke', 'SMS settings updated': 'Postavke SMS-a su ažurirane', 'SMS updated': 'SMS ažuriran', 'SMS via SMTP (Outbound)': 'SMS preko SMTP (izlazni)', 'SMS WebAPI (Outbound)': 'SMS WebAPI (izlazni)', 'SMTP to SMS settings updated': 'SMTP u SMS postavke ažurirane', 'Snapshot': 'Snimak stanja', 'Snapshot Report': 'Kratko izvješće', 'Snow Fall': 'Sniježne padavine', 'Snow Squall': 'Snježna oluja', 'Social': 'Društveno', 'Soil bulging, liquefaction': 'Ispupčenje tla, rastapanje', 'Soliciting Cash Donations?': 'Iznuđene donacije u gotovini?', 'Solicitudes': 'Zabrinutost', 'Solid waste': 'Kruti otpad', 'Solution': 'Rješenje', 'Solution added': 'Rješenje dodano', 'Solution deleted': 'Rješenje izbrisano', 'Solution Details': 'Detalji rješenja', 'Solution Item': 'Stavka rješenja', 'Solution updated': 'Rješenja izmjenjena', 'Solutions': 'Rješenja', 'Somalia': 'Somalija', 'Some': 'Neki', 'Sorry - the server has a problem, please try again later.': 'Izvinjavamo se - problem sa serverom, molimo pokušajte kasnije.', 'Sorry location %(location)s appears to be outside the area of parent %(parent)s.': 'Žao nam je, ali izgleda da se lokacija %(location)s nalazi izvan oblasti roditelja %(parent)s..', 'Sorry location %(location)s appears to be outside the area supported by this deployment.': 'Izvinite ali lokacija %(location)s je izvan područja koje je podržano ovom instalacijom.', 'Sorry location appears to be outside the area of parent %(parent)s.': 'Žao nam je, ali izgleda da se lokacija nalazi izvan oblasti roditelja %(parent)s..', 'Sorry location appears to be outside the area supported by this deployment.': 'Izvinite ali ta lokacija je izvan područja koje je podržano ovom instalacijom', 'Sorry that location appears to be outside the area of the Parent.': 'Žao nam je, ali izgleda da se lokacija nalazi izvan oblasti roditelja.', 'Sorry that location appears to be outside the area supported by this deployment.': 'Nažalost ali ta lokacija je izvan područja koje je podržano ovim sistemom.', 'Sorry, I could not understand your request': 'Oprostite, ne mogu razumjeti vaš zahtjev.', 'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'Nažalost samo korisnicima sa MapAdmin ulogom je dozvoljeno kreiranje grupa lokacija.', 'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Žao nam je, samo korisnici sa ulogom administratora mape imaju dozvolu da uređuju ove lokacije', 'Sorry, something went wrong.': 'Žao nam je, dogodila se greška.', 'Sorry, that page is forbidden for some reason.': 'Nažalost, ta stranica je zabranjena iz nekog razloga.', 'Sorry, that service is temporary unavailable.': 'Žao nam je, ova usluga je trenutno nedostupna.', 'Sorry, there are no addresses to display': 'Izvinite, ne postoje adrese za prikaz', "Sorry, things didn't get done on time.": 'Žao nam je, stvari nisu završene na vrijeme', "Sorry, we couldn't find that page.": 'Izvinite, ta stranica nije pronađena.', 'Source': 'Izvor', 'source': 'target', 'Source deleted': 'Izvor izbrisan', 'Source ID': 'Identifikacija izvora', 'Source Link': 'Izvorna veza', 'Source Name': 'Ime izvora ', 'Source of Information': 'Izvor informacije', 'Source Time': 'Izvorno vrijeme', 'Source updated': 'Izvor ažuriran.', 'Source URL': 'URL izvora', 'Sources': 'Izvori', 'Sources of income': 'Izvori prihoda', 'South Africa': 'Južna Afrika', 'South Ossetia': 'Južna Osetija', 'Space Debris': 'Svemirski otpad', 'Spain': 'Španija', 'Spanish': 'Španski', 'Special Ice': 'Specijalni led', 'Special Marine': 'Posebna mornarica', 'Special Multirisk Protection Vehicle': 'Specijalna varijabla za zaštitu od višestrukog rizika', 'Special needs': 'Specijalne potrebe', 'Specialized Hospital': 'Specijalizovana Bolnica', 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Tačno mjesto (npr. zgrada / soba) u okviru lokacije na kojem je ta osoba/grupa viđena.', 'Specific locations need to have a parent of level': 'Specifične lokacije moraju imati roditelja nivoa', 'Specific Operations Vehicle': 'Vozilo za specifične poslove', 'specify': 'precizirati', 'Specify a descriptive title for the image.': 'Specificiraj opisni naslov za sliku', 'Specify the bed type of this unit.': 'Specifikuj tip kreveta za ovu jedinicu', 'Specify the minimum sustainability in weeks or days.': 'Navedi minimalnu održivosr u sedmicama ili danima.', 'Specify the number of available sets': 'Specificiraj broj raspoloživih setova', 'Specify the number of available units (adult doses)': 'Specificiraj broj dostupnih jedinica (odrasle doze)', 'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Specificirajte broj dostupnih jedinica (litara) Ringer-Laktata ili ekvivalentnih rastvora', 'Specify the number of sets needed per 24h': 'Specificirati broj skupova potrebnih za 24h', 'Specify the number of units (adult doses) needed per 24h': 'Specificiraj broj jedinica (doza za odrasle) potrebnih u 24 sata', 'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Naznačite broj jedinica (litara) Ringer-laktata ili ekvivalentnih rastvora potrebnih za 24h', 'Speed': 'Brzina', 'Spherical Mercator (900913) is needed to use OpenStreetMap/Google/Bing base layers.': 'Sferni Mercator (900913) je potreban za upotrebu OpenStreetMap/Google/Bing baznih slojeva.', 'Spherical Mercator?': 'Sferna Merkatorova?', 'Spreadsheet': 'Tabela (spreadsheet)', 'Spreadsheet Importer': 'Uvoznik tabelarnog prikaza', 'Spreadsheet uploaded': 'Učitan tabelarni prikaz', 'Spring': 'Proljeće', 'Squall': 'Udar vjetra', 'squint-eyed': 'razrok', 'Sri Lanka': 'Šri Lanka', 'Staff': 'Osoblje', 'staff': 'osoblje', 'Staff & Volunteers': 'Osoblje i volonteri', 'Staff & Volunteers (Combined)': 'Osoblje i volonteri (kombinovano)', 'Staff 2': 'Osoblje 2', 'Staff added': 'Član osoblja dodan', 'Staff and Volunteers': 'Osoblje i volonteri', 'Staff Assigned': 'Dodijeljeno osoblje', 'Staff Assignment Details': 'Detalji o dodjeli osoblja', 'Staff Assignment removed': 'Obrisana dodjela osoblja', 'Staff Assignment updated': 'Dodjela osoblja ažurirana', 'Staff Assignments': 'Dodjele osoblja', 'Staff deleted': 'Osoblje obrisano', 'Staff ID': 'ID osoblja', 'Staff Management': 'Upravljanje osobljem', 'Staff member added': 'Član osoblja dodan', 'Staff Member added': 'Član osoblja dodan', 'Staff Member deleted': 'Član osoblja obrisan', 'Staff Member Details': 'Detalji o članovima osoblja', 'Staff Member Details updated': 'Detalji Član osoblja ažuriran', 'Staff Members': 'Članovi osoblja', 'staff members': 'članovi osoblja', 'Staff present and caring for residents': 'Osoblje je prisutno i brine za stanovnike', 'Staff Record': 'Zapis o osoblju', 'Staff Report': 'Izvještaj o osoblju', 'Staff Type added': 'Tip osoblja dodan', 'Staff Type deleted': 'Tip osoblja obrisan', 'Staff Type Details': 'Detaljii o osoblju', 'Staff Type updated': 'Tip osoblja ažuriran', 'Staff Types': 'Vrste osoblja', 'Staff updated': 'Osoblje ažurirano', 'Staff with Contracts Expiring in the next Month': 'Osoblje čiji ugovor ističe sljedećeg mjeseca', 'Staff/Volunteer': 'Osoblje/volonteri', 'Staff/Volunteer Record': 'Zapis o osoblju/volonterima', 'Staff2': 'Osoblje2', 'Staffing': 'Zapošljavanje', 'Staffing Level': 'Nivo osoblja', 'Stairs': 'Stepenice', 'Start Date': 'Datum početka', 'Start date': 'Datum početka', 'Start of Period': 'početak perioda', 'state': 'stanje', 'State': 'Država', 'State / Province': 'Entitet / Savezna država', 'state location': 'položaj države', 'Stationery': 'Školski pribor', 'Statistics': 'Statistika', 'Status': 'Status', "Status 'assigned' requires the %(fieldname)s to not be blank": "Status 'dodijeljen' zahtijeva da %(fieldname)s nije prazno", 'Status added': 'Dodat status', 'Status deleted': 'Obrisan status', 'Status Details': 'Detalji statusa', 'Status of clinical operation of the facility.': 'Status kliničkog rada objekta.', 'Status of general operation of the facility.': 'Status generalnih operacija objekata', 'Status of morgue capacity.': 'Status kapaciteta mrtvačnice', 'Status of operations of the emergency department of this hospital.': 'Operativni status hitnog odjela ove bolnice', 'Status of operations/availability of emergency medical services at this facility.': 'Status operacija/dostupnosti hitnih medicinskih usluga na ovom objektu.', 'Status of security procedures/access restrictions for the facility.': 'Status sigurnosnih procedura/ograničenja pristupa za ovaj objekat.', 'Status of security procedures/access restrictions in the hospital.': 'Status sigurnosnih procedura/zabrane pristupa u bolnicama.', 'Status of the clinical departments.': 'Status kliničkih odjela', 'Status of the facility.': 'Stanje objekta', 'Status of the operating rooms of this facility.': 'Status radnih prostorija na ovom objektu', 'Status of the operating rooms of this hospital.': 'Status operacionih sala u ovoj bolnici.', 'Status Report': 'Izvještaj o stanju', 'Status Report added': 'Statusni izvještaj dodan', 'Status Report deleted': 'Statusni izvještaj obrisan', 'Status Report updated': 'Statusni izvještaj ažuriran', 'Status Updated': 'Status ažuriran', 'Status updated': 'Ažuriran status', 'Statuses': 'Statusi', 'Steel frame': 'Čelični okvir', 'Stock': 'Zaliha', 'Stock added to Warehouse': 'Zaliha dodana u skladište', 'Stock Adjustment': 'Prilagođenje zalihe', 'Stock Adjustment Details': 'Prilagođenje zalihe', 'Stock Adjustments': 'Detalji prilagođenja zaliha', 'Stock Count created': 'Broj zaliha kreiran', 'Stock Count deleted': 'Količina zalihe obrisana', 'Stock Count Details': 'Detalji o broju zaliha', 'Stock Count modified': 'Broj zaliha izmijenjen', 'Stock Counts': 'Količine zaliha', 'Stock Expires %(date)s': 'Zaliha ističe %(date)s', 'Stock in Warehouse': 'Zaliha u skladištu', 'Stock removed from Warehouse': 'Zaliha uklonjena iz skladišta', 'Stolen': 'Ukradeno', 'Storage Bin': 'Korpa za čuvanje', 'Storage Bin added': 'Dodana korpa za smještaj', 'Storage Bin deleted': 'Obrisana smještajna korpa', 'Storage Bin Details': 'Detalji korpe za smještaj', 'Storage Bin Number': 'Broj smještajne korpe', 'Storage Bin Type added': 'Dodana vrsta korpe za smještaj', 'Storage Bin Type deleted': 'Tip korpe za smještaj obrisan', 'Storage Bin Type Details': 'Detalji korpe za smještaj', 'Storage Bin Type updated': 'Korpa za smještaj ažurirana', 'Storage Bin Types': 'Vrste smještajnih korpi', 'Storage Bins': 'Korpe za smještaj', 'Storage Capacity (m3)': 'Kapacitet smještaja(m3)', 'Storage Location': 'Lokacije skladišta', 'Storage Location deleted': 'Lokacija skladišta obrisana', 'Storage Location ID': 'ID lokacije skladišta', 'Storage Location Name': 'Naziv lokacije skladišta', 'Storage Locations': 'Lokacija skladišta', 'Storage Type': 'Tip smještaja', 'Store spreadsheets in the Eden database': 'Sačuvaj proračunske tablice u Eden bazu podataka', 'Storeys at and above ground level': 'Spratovi na i iznad razine tla', 'Storm Force Wind': 'Olujni Vjetar', 'Storm Surge': 'Olujni val', 'Stowaway': 'Slijepi putnik ', 'straight': 'pravo', 'Strategy': 'Strategija', 'Street (add.)': 'Uca (adresa)', 'Street (continued)': 'Ulica (nastavljena)', 'Street Address': 'Adresa (ulica)', 'Street View': 'Prikaz ulica', 'Streetview Enabled?': 'Streetview omogućen?', 'String used to configure Proj4js. Can be found from %(url)s': 'String korišten za konfiguraciju Proj4js. Može se naći na %(url)s', 'Strong': 'Jako', 'Strong Wind': 'Jak vjetar', 'Structural': 'Strukturalno', 'Structural Hazards': 'Strukturne opasnosti', 'Style': 'Stil', 'Style Field': 'Polje stila', 'Style invalid': 'Neispravan stil', 'Style Values': 'vrijednosti stila', 'Sub Category': 'Potkategorija', 'Sub-type': 'Podtip', 'Subject': 'Tema', 'Submission Succesful': 'Predaja uspješna', 'Submission successful - please wait': 'Slanje uspješno - molimo pričekajte', 'Submission successful - please wait...': 'Podnesak uspješan- molimo pričekajte', 'Submit': 'Unesi', 'submit': 'unesi', 'Submit a request for recovery': 'Podnijeti zahtjev za oporavak', 'Submit New': 'Predaj novi', 'Submit New (full form)': 'Podnesite novi (potpuna forma)', 'Submit New (triage)': 'Navedi novi (trijaža)', 'Submit new Level 1 assessment (full form)': 'Potvrdi novo procjenjivanje prvog nivoa (potpuna forma)', 'Submit new Level 1 assessment (triage)': 'Proslijedi novu procjenu nivoa 1 (trijaža)', 'Submit new Level 2 assessment': 'Podnesi novu procjenu Nivoa 2', 'Submitting information about the individual such as identification numbers, physical appearance, last seen location, status, etc': 'Slanje informacija o pojedincima, poput identifikacionih brojeva, fizičkog izgleda, mjesta gdje je zadnji put viđen, status itd', 'Subscribe': 'Pretplati se', 'Subscription added': 'Dodan potpis', 'Subscription deleted': 'Pretplata obrisana', 'Subscription Details': 'Detalji pretplate', 'Subscription updated': 'Pretplata izmjenjena', 'Subscriptions': 'Pretplate', 'Subscriptions Status': 'Status pretplate', 'Subsector': 'podsektor', 'Subsector added': 'Podsektor dodat', 'Subsector deleted': 'Podsektor izbrisan', 'Subsector Details': 'Detalji o podsektoru', 'Subsector updated': 'Ažuriran podsektor', 'Subsectors': 'Podsektori', 'Subsistence Cost': 'Trošak opstanka', 'SubType of': 'Podtip od', 'Suburb': 'Predgrađe', 'Successfully registered at the repository.': 'Uspješno registrovano na repozitoriju.', 'suffered financial losses': 'uočeni finansijski gubici', 'Sufficient care/assistance for chronically ill': 'Dovoljna briga/pomoć za hronično bolesne', 'Suggest not changing this field unless you know what you are doing.': 'Predlažemo da ne vršite nikakve izmjene ovog polja, osim ako ne znate šta radite.', 'Summary': 'Sažetak', 'Summary by Administration Level': 'Sažetak na administrativnom nivou', 'Summary by Question Type - (The fewer text questions the better the analysis can be)': 'Sumarno po vrsti piranja - (Što je manje tekstualnih pitanja, bolja je analiza=', 'Summary Details': 'Detalji sažetka', 'Summary of Completed Assessment Forms': 'Sažetak završenog formular ocjene', 'Summary of Incoming Supplies': 'Sumarno ulaz zaliha', 'Summary of Releases': 'Sumarno izlaz', 'Sunday': 'Nedjelja', 'Supervisor': 'Nadglednik', 'Supplier': 'Dobavljač', 'Supplier added': 'Dobavljač dodan', 'Supplier deleted': 'Dobavljač obrisan', 'Supplier Details': 'Detalji dobavljača', 'Supplier updated': 'Dobavljač ažuriran', 'Supplier/Donor': 'Dobavljač/donator', 'Suppliers': 'Dobavljači', 'Supply Chain Management': 'Upravljanje lancom zaliha', 'Supply Item Categories': 'kategorije predmeta za snadbjevanje.', 'Support Request': 'Zahtjev za podršku', 'Support Requests': 'Zahtjevi za podršku.', 'supports nurses in the field to assess the situation, report on their activities and keep oversight.': 'podrška medicinskim sestrama vezano za procjenu situacije, praćenje aktivnosti i nagzor', 'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Podržava odlučivanje velikih grupa eksperata kriznog menadžmenta pomažući grupama da kreiraju rangirane liste', 'Sure you want to delete this object?': 'Da li ste sigurni da želite da obrišete ovaj objekat?', 'Surgery': 'Operacija', 'Suriname': 'Surinam', 'Surplus': 'Višak vrijednosti', 'Survey Answer': 'Odgovori na ankete', 'Survey Answer added': 'Dodat anketni odgovor', 'Survey Answer deleted': 'Izbrisan odgovor na istraživanje', 'Survey Answer Details': 'Detalji odgovora upitnika', 'Survey Answer updated': 'Odgovori anketa ažurirani', 'Survey Module': 'Modul istraživanja', 'Survey Name': 'Naziv istraživanja', 'Survey Question': 'Anketno pitanje', 'Survey Question added': 'Anketna pitanja dodana', 'Survey Question deleted': 'Pitanje ankete obrisano', 'Survey Question Details': 'Detalji pitanja ankete', 'Survey Question Display Name': 'Naslovno Ime pitanja ankete', 'Survey Question updated': 'Anketno pitanje ažurirano', 'Survey Section': 'Anketna sekcija', 'Survey Section deleted': 'Izbrisan odjeljak istraživanja', 'Survey Section Details': 'Detalji odjeljka ankete', 'Survey Section Display Name': 'Naslovno ime odjeljka ankete', 'Survey Section updated': 'Serija istraživanja ažurirana', 'Survey Series': 'Niz anketa', 'Survey Series added': 'Niz aketa dodan', 'Survey Series deleted': 'Serija anketa obrisana', 'Survey Series Details': 'Detalji toka ankete', 'Survey Series Name': 'Naziv niza anketa', 'Survey Series updated': 'Serija istraživanja ažurirana', 'Survey Template': 'Šablon za anketu', 'Survey Template added': 'Obrazac za Upitnik dodan', 'Survey Template deleted': 'Predložak ankete obrisan', 'Survey Template Details': 'Detalji predloška ankete', 'Survey Template updated': 'Šablon za anketu ažuriran', 'Survey Templates': 'Šabloni anketa', 'Surveys': 'Istraživanja', 'Swaziland': 'Svazilend', 'Sweden': 'Švedska', 'Switch to 3D': 'Prebaci na 3D', 'Switzerland': 'Švajcarska', 'Symbologies': 'Značenje simbola', 'Symbology': 'Značenje simbola', 'Symbology added': 'Značenje simbola dodano', 'Symbology deleted': 'Značenje simbola obrisano', 'Symbology Details': 'Detalji značenja simbola', 'Symbology removed from Layer': 'Značenja simbola uklonjena iz sloja', 'Symbology updated': 'Značenje simbola ažurirano', 'Sync Conflicts': 'Konflikti sinkronizacije', 'Sync History': 'Historija sinhronizovanja', 'Sync Now': 'Sinhroniziraj sad', 'Sync Partners': 'Sinhronizuj partnere', 'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Sync Partneri su instance ili saradnici (SahanaEden, SahanaAgasti, Ushahidi, itd.) s kojima želite usklađivati ​​podatke . Kliknite na link na desnoj strani da odete na stranicu na kojoj možete dodati sinhronizacijskog partnera, tražiti sinhronizacijske partnere i mijenjati ih.', 'Sync Password': 'Sinhronizacija lozinki', 'Sync Policy': 'Politika sinhronizacije', 'Sync Pools': 'Sinhronizacija grupisanja', 'Sync process already started on': 'Sinhronizacija procesa već započeta ', 'Sync process already started on ': 'Proces sinhronizacije je već započeo ', 'Sync Schedule': 'Sinkronizirati raspored', 'Sync Schedules': 'Sinhronizacija rasporeda', 'Sync Settings': 'Postavke sikronizacije', 'Sync Settings updated': 'Postavke sinhronizacije su ažurirane', 'Sync Username': 'Sinhronizuj korisničko ime', 'Synchronisation': 'Sinhronizacija', 'Synchronisation - Sync Now': 'Sinhronizacija - sinhronizuj sada', 'Synchronisation History': 'Istorija sinhronizacije', 'Synchronization': 'Usklađivanje', 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Sinhronizacija vam omogućuje da dijelite vaše podatke sa ostalima i ažurirate vlastitu bazu podataka sa najnovijim podacima od ostalih učesnika. Ova stranica vam pruža informacije o tome kako koristiti pogodnosti Sahana Eden sinhronizacije.', 'Synchronization Conflicts': 'Sinhronizacijski konflikti', 'Synchronization currently active - refresh page to update status.': 'Sinhronizacija trenutno aktivna - osvježite stranicu da ažurirate status.', 'Synchronization Details': 'Detalji sinhronizovani', 'Synchronization History': 'Historija sinhronizacije', 'Synchronization Job': 'Posao sinhronizacije', 'Synchronization Log': 'Zapisnik sinhronizacije', 'Synchronization mode': 'Režim sinhronizacije', 'Synchronization not configured': 'Sinhronizacija nije konfigurisana', 'Synchronization not configured.': 'Sinhronizacija nije konfigurisana', 'Synchronization Peers': 'Sinhronizacijski saradnici', 'Synchronization Schedule': 'Raspored sinhronizacije', 'Synchronization Settings': 'Postavke za sinhronizaciju', 'Synchronization settings updated': 'Sinhronizacijske postavke ažurirabne', 'Syncronisation History': 'Historija sinhronizacije', 'Syria': 'Sirija', 'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': 'Sistem prati sve volontere koji rade u području katastrofe. Snima ne samo mjesta gdje su aktivni , već također snima podatke u dometu usluga koje se pružaju u svakom području.', "System's Twitter account updated": 'Twitter nalog sistema je ažuriran', 'São Tomé and Príncipe': 'São Tomé i Príncipe', 'Table': 'Tabela', 'table': 'tabela', 'Table name of the resource to synchronize': 'Ime tabele s resursom za sinhronizaciju', 'Table Permissions': 'Dozvole tabele', 'table_name': 'naziv_tabele', 'Tablename': 'Ime tabele', 'Tag': 'Oznaka', 'Tag added': 'Oznaka dodana', 'Tag deleted': 'Oznaka obrisana', 'Tag Details': 'Detalji oznake', 'Tag Post': 'Stavljanje oznake', 'Tag removed': 'Oznaka uklonjena', 'Tag updated': 'Oznaka ažurirana', 'Tags': 'Oznake', 'Taiwan': 'Tajvan', 'Tajikistan': 'Tadžikistan', 'Take shelter in place or per <instruction>': 'Pronađi utočiste u mjestu ili prema <instruction>', 'tall': 'visok', 'Tanzania': 'Tanzanjia', 'Task': 'Zadatak', 'Task added': 'Dodan zadatak', 'Task deleted': 'Obrisan zadatak', 'Task Details': 'Detalji o zadatku', 'Task List': 'Lista zadataka', 'Task removed': 'Zadatak uklonjen', 'Task Status': 'Status zadatka', 'Task updated': 'Ažuriran zadatak', 'Tasks': 'Zaduženja', 'tattooed': 'tetoviran', 'Team': 'Tim', 'Team added': 'Dodan tim', 'Team deleted': 'Obrisan tim', 'Team Description': 'Opis tima', 'Team Details': 'Detalji tima', 'Team Head': 'Vođa time', 'Team ID': 'ID tima', 'Team Leader': 'Vođa tima', 'Team Member added': 'Član grupe dodan', 'Team Members': 'Članovi tima', 'Team Name': 'Naziv tima', 'Team Type': 'Tip tima', 'Team updated': 'Ažuriran tim', 'Teams': 'Timovi', 'technical failure': 'tehnički neuspjeh', 'Technical Support Vehicle': 'Vozila za tehničku podršku', 'Technical testing only, all recipients disregard': 'Samo tehničko ispitivanje, bez obzira na sve primaoce', 'Teeth': 'Zubi', 'Teeth, Dentures': 'Zubi, proteze', 'Teeth, Gaps between front teeth': 'Zubi, razmak između prednjih zuba', 'Teeth, Missing teeth': 'Zubi, nedostajući zubi', 'Teeth, Toothless': 'Zubi, bezub', 'Telecommunications': 'Telekomunikacije', 'Telephone': 'telefon', 'Telephone Details': 'Telefonski detalji', 'Telephony': 'Telefonija', 'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.': 'Naređuje GeoServeru da uradi MetaTiling što smanjuje broj dupliciranih labela.', 'Temp folder %s not writable - unable to apply theme!': 'Privremeni direktorij %s nije za pisanje - nemoguce staviti temu!', 'Template': 'Predložak', 'Template file %s not readable - unable to apply theme!': 'Datoteka predložaka %s nije čitljiva - ne može se primijeniti tema!', 'Template Name': 'Naziv predloška', 'Template Section added': 'Dodan odjeljak predloška', 'Template Section deleted': 'Obrisan odjeljak predloška', 'Template Section Details': 'Detalji odjeljka predloška', 'Template Section updated': 'Ažuriran odjeljak predloška', 'Template Sections': 'Odjeljci predloška', 'Template Summary': 'Rezime predloška', 'Templates': 'Predlošci', 'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Termin za peti nivo adminstrativne podjele unutar zemlje (npr. glasačko mjesto). Ovaj nivo se ne koristi često.', 'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Termin za administrativnu podjelu unutar zemlje na četvrtom nivou (Mjesna zajednica)', 'Term for the primary within-country administrative division (e.g. State or Province).': 'Termin koji se koristi za prvi nivo administrativne podjele (Entitet/Distrikt)', 'Term for the secondary within-country administrative division (e.g. District or County).': 'Termin za administrativnu podjelu drugog nivoa (Kanton/Regija)', 'Term for the third-level within-country administrative division (e.g. City or Town).': 'Termin za administrativnu jedinicu trećeg nivoa (Općina/Opština).', 'Term for the top-level administrative division (i.e. Country).': 'Termin za administrativnu podjelu najvišeg nivoa (Država)', 'Terms of Service': 'Uslovi korištenja', 'Terms of Service\n\nYou have to be eighteen or over to register as a volunteer.': 'Uvjeti korištenja\n\nMorate biti osamnaest ili preko osamnaest da bi bili volonter.', 'Terms of Service\r\n\r\nYou have to be eighteen or over to register as a volunteer.': 'Uvjeti korištenja\r\n\r\nMorate biti osamnaest ili preko osamnaest da bi bili volonter.', 'Terms of Service:': 'Uslovi korištenja:', 'Territorial Authority': 'Teritorijalni autoritet', 'Terrorism': 'Terorizam', 'Tertiary Server (Optional)': 'Tercijarni server (Nije obavezno)', 'Text': 'Tekst', 'Text before each Text Field (One per line)': 'Tekst ispred svakog tekstualnog polja (jedan po redu)', 'Text Colour for Text blocks': 'Boja teksta za tekst blokova', 'Text Direction': 'Smijer teksta', 'Text in Message:': 'Tekst u poruci:', 'Thailand': 'Tajland', 'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Hvala na potvrdi svog email-a. Vaš korisnički račun čeka na odobrenje administratora (%s). Dobit ćete obavještenje email-om kad vam račun bude aktiviran.', 'Thanks for your assistance': 'Hvala na Vašoj pomoći', 'The': '!', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': '"Upit" je uslov poput "db.tablela1.polje1==\'vrijednost\'". Nešto poput "db.tabela1.polje1 == db.tabela2.polje2" kao rezultat daje SQL JOIN (spajanje).', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Pitanje" je stanje poput "db.tablela1.polje1==\'vrijednost\'". Nešto poput "db.tabela1.polje1 == db.tabela2.polje2" kao rezultat daje SQL JOIN (spajanje).', 'The answers are missing': 'Nedostaju odgovori', 'The area is': 'Površina je', 'The Area which this Site is located within.': 'Podrucje u kojem se nalazi zadano mjesto', 'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyzed': 'Modul procjene čuva predloške procjene i omogućava odgovore na procjene za specifične događaje da se sakupe i analiziraju', 'The Assessments module allows field workers to send in assessments.': 'Modul procjena omogućava radnicima na terenu da pošalju procjene.', 'The asset must be assigned to a site OR location.': 'Sredstvo mora biti dodjeljeno mjestu ILI lokaciji', 'The attribute used to determine which features to cluster together (optional).': 'Atribut koji se koristi za određivanje koje se karakteristike zajedno spajaju (opciono).', 'The attribute which is used for the title of popups.': 'Atribut koji se koristi za naslove popup-a.', 'The attribute within the KML which is used for the title of popups.': 'Atribut unutar KML koji se koristi za titulu iskočnih prozora.', 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'KML atribut(i) korišteni za tijelo iskočnih prozora. (atribute razdvojiti praznim znakom)', 'The Author of this Document (optional)': 'Autor ovog dokumenta (opcionalno)', 'The Bin in which the Item is being stored (optional).': 'Korpa u kojoj je stavka smještena (opciono)', 'The body height (crown to heel) in cm.': 'Visina (od glave do pete) u cm.', 'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'Modul za procjenu zgrada omogućava da se ocjeni sigurnost zgrade , n.p.r. poslje zemljotresa.', 'The Camp this person is checking into.': 'Kamp u koji se ova osoba prijavljuje.', 'The Camp this Request is from': 'Kamp iz koga zahtjev potiče', 'The category of the Item.': 'Kategorija stavke', 'The client ID to use for authentication at the remote site (if required for this type of repository).': 'Klijentski ID za autentifikaciju na udaljenom sajtu (ako je potrebno za ovu vrstu repozitorija)-', 'The client secret to use for authentication at the remote site (if required for this type of repository).': 'Klijentska tajna šifra potrebna za autentifikaciju na udaljenom sajtu (ako je potrebno za ovu vrstu repozitorija).', 'The country the person usually lives in.': 'Država u kojoj osoba živi.', 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Trenutna lokacija osobe/grupe, koja može biti generalna(za izvještaje) ili precizna(za prikazivanje na mapi). Unesite nekoliko znakova da pretražite dostupne lokacije.', 'The default Facility for which this person is acting.': 'Zadani objekt za koje data osoba djeluje.', 'The default Facility for which you are acting.': 'Podrazumjevani objekat za koji djelujete.', 'The default Organization for whom this person is acting.': 'Predefinirana organizacija za koju ova osoba djeluje.', 'The default Organization for whom you are acting.': 'Podrazumijevana organizacija za koju djelujete', 'The default policy for data import from this peer.': 'Predefinirana pravila za uvoz podataka od ovog suradnika', 'The descriptive name of the peer.': 'Opisni naziv suradnika', 'The District for this Report.': 'Geografsko područje za ovaj izvještaj', "The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "Donator(i) za ovaj projekat. Mogu se odabrati višestruke vrijednosti, držanjem pritisnute 'Control' ('Ctrl') tipke.", 'The duplicate record will be deleted': 'Dupli zapis će biti obrisan', 'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'E-mail adresa na koju su poslani zahtjevi za odobrenjem (obično će ovo biti grupni mail umjesto individualnog). Ako je polje prazno, onda se zahtjevi automatski odobravaju ako se domena podudara.', 'The facility where this position is based.': 'Objekat na kom je ova pozicija bazirana.', 'The first or only name of the person (mandatory).': 'Ime ili jedino ime osobe (obavezno)', 'The following %(new)s %(resource)s have been added': 'Sljedeći %(new)s %(resource)s je dodan', 'The following %(upd)s %(resource)s have been updated': 'Sljedeći %(new)s %(resource)s je ažuriran', 'The following %s have been added': 'Sljedeće %s je dodano', 'The following %s have been updated': 'Sljedeće %s je ažurirano', 'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'Forma URL-a je http://your/web/map/service?service=WMS&request=GetCapabilities gdje vasa your/web/map/service predstavlja URL stazu za WMS', 'The Gambia': 'Gambia', 'The Group whose members can edit data in this record.': 'Grupa čiji članovi mogu uređivati podatke u ovom zapisu', 'The hospital this record is associated with.': 'Bolnica s kojom je zapis povezan', 'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'Sistem za prijavu incidenata omogućuje javnosti da prijave incidente i da ih prati.', 'The language to use for notifications.': 'Jezik korišten za napomene', 'The language you wish the site to be displayed in.': 'Jezik u kojem želite da stranica bude prikazana.', 'The last known location of the missing person.': 'Zadnja poznata lokacija za nedostajuću osobu', 'The length is': 'Dužina je', 'The level at which Searches are filtered.': 'Nivo na kom su filtrirane pretrage', 'The list of Brands are maintained by the Administrators.': 'Listu marki održavaju administratori.', 'The list of Catalogs are maintained by the Administrators.': 'Listu kataloga održavaju administratori.', 'The list of Item categories are maintained by the Administrators.': 'Lista kataloga stavki koju održavaju administratori.', 'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': 'Lokacija ovog mjesta, koja može biti općenita (za izvještaje) ili precizna (za prikaz na mapi). ', 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Lokacija odakle osoba dolazi, koja može biti generalna (za izvještavanje) ili precizna (za prikaz na mapi). Unesite nekoliko početnih karaktera za pretragu dostupnih lokacija.', 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Lokacija na koju osoba ide, koja može biti općenita (za izvještaje) ili precizna (za prikaz na mapi). Unesi nekoliko karaktera za pretragu dostupnih lokacija.', 'The map will be displayed initially with this latitude at the center.': 'Mapa će biti prikazana inicijalno, sa ovom geografskom širinom u centru.', 'The map will be displayed initially with this longitude at the center.': 'Karta će biti prvobitno predstavljena sa ovom geografskom dužinom u centru.', 'The Maximum valid bounds, in projected coordinates': 'Maksimalne važeće granice u projektovanim koordinatama', 'The Media Library provides a catalog of digital media.': 'Media Library pruža kataloge digitalnih medija', 'The Media Library provides a catalogue of digital media.': 'Media Library pruža kataloge digitalnih medija', 'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'Modul za slanje poruke je glavni dio za komunikaciju Sahana sistema. Koristi se za slanje upozorenja/ili poruka koristenjem SMS ili e-maila razlicitim grupama i osoboma, tokom ili poslije nepogode.', 'The minimum number of features to form a cluster.': 'Najmanji broj karakteristika za formiranje skupa.', 'The minimum number of features to form a cluster. 0 to disable.': 'Najmanji broj karakteristika za formiranje skupa. 0 za isključiti-', 'The name to be used when calling for or directly addressing the person (optional).': 'Naziv koji se koristi kada se poziva ili neposredno obraća osobi (opcionalno).', 'The next screen will allow you to detail the number of people here & their needs.': 'Sljedeći ekran će vam omogućiti da opišete broj ljudi ovdje i njihove potrebe.', 'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': 'Sljedeći ekran će vam omogućiti da unesete detaljan spisak objekata i količina, ako odgovara...', 'The number of pixels apart that features need to be before they are clustered.': 'Koliko piksela oznake karakteristika trebaju biti razdvojene prije njihovog grupisanja', 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'Broj pločica oko vidljive karte za preuzimanje. Nula znači da de 1. stranica učita brže, veće brojke čine da je naknadno paniranje brže.', 'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'Broj mjernih jedinica alternativnih stavki koji je jednak jednoj mjernoj jedinici stavke', 'The Office this record is associated with.': 'Ured s kojom je zapis povezan', 'The Organisation which is funding this Activity.': 'Organizacija koja osniva ou aktivnost', 'The Organization Registry keeps track of all the relief organizations working in the area.': 'Registar organizacija zadržava zapise svih potpornih organizacija u radnoj oblasti.', 'The Organization this record is associated with.': 'Organizacija s kojom je zapis povezan', 'The Organization which is funding this Activity.': 'Organizacija koja osniva ovu aktivnost', 'The parse request has been submitted': 'Zahtjev za analizu je bio podnesen', 'The Patient Tracking system keeps track of all the evacuated patients & their relatives.': 'Sistem Praćenja Pacijenta prati sve evakuisane pacijente i njihove porodice.', 'The person at the location who is reporting this incident (optional)': 'Osoba na lokaciji koja prijavljuje ovaj incident (neobavezno)', 'The Person currently filling this Role.': 'Osoba koja trenutno obavlja ovu ulogu', 'The person reporting about the missing person.': 'Osoba koja je prijavila nestanak osobe', 'The person reporting the missing person.': 'Osoba koja je prijavila nestalu osobu', "The person's manager within this Office/Project.": 'Rukovodilac osobe u ovom uredu/projektu', 'The poll request has been submitted, so new messages should appear shortly - refresh to see them': 'Zahtjev za anketom je podnesen, pa bi se nove poruke uskoro trebale pojaviti - osvježite da ih vidite', 'The POST variable containing the phone number': 'POST varijabla koja sadrži telefonski broj', 'The post variable containing the phone number': 'Postavljena varijabla koja sadrži telefonski broj', 'The post variable on the URL used for sending messages': 'Post varijabla u URL koja se koristi za slanje poruka', 'The POST variable on the URL used for sending messages': 'POST varijabla u URL koja se koristi za slanje poruka', 'The post variables other than the ones containing the message and the phone number': 'Varijable objave različite od onih koje sadrže poruku i broj telefona', 'The POST variables other than the ones containing the message and the phone number': 'POST varijable različite od onih koje sadrže poruku i broj telefona', "The Project module can be used to record Project Information and generate Who's Doing What Where reports.": 'Projektni modul se može koristiti za zapis projektnih informacija i generisanje izvještaja "Ko šta radi gdje?".', 'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'Modul za praćenje projekta omogućuje stvaranje aktivnosti kako bi se ispunile praznine u procjeni potreba.', "The provided 'formuuid' is invalid. You have selected a Form revision which does not exist on this server.": "Navedeni 'formuuid' je nevažeći. Odabrali ste reviziju forme koja ne postoji na ovom serveru.", "The provided 'jobuuid' is invalid. The session of Form upload is invalid. You should retry uploading.": "Navedeni 'jobuuid' je nevažeći. Sesija postavljanja formulara je nevažeća. Trebate ponoviti postavljanje.", 'The Rapid Assessments Module stores structured reports done by Professional Organisations.': 'Modul brze procjene čuva struktuirane izvještaje koje obavljaju profesionalne organizacije', 'The Rapid Assessments Module stores structured reports done by Professional Organizations.': 'Modul brze procjene čuva struktuirane izvještaje koje obavljaju profesionalne organizacije', 'The request this record is associated with.': 'Zahtjev s kojim je ovaj zapis povezan', 'The Request this record is associated with.': 'Zahtjev s kojim je ovaj zapos povezan', 'The Role this person plays within this hospital.': 'Uloga koju ova osoba ima u ovoj bolnici.', 'The Role this person plays within this Office/Project.': 'Uloga koju ova osoba ima u ovom uredu/projektu', 'The Role to which this Role reports.': 'Uloga za koju se ova uloga izvještava.', 'The scanned copy of this document.': 'Skenirana kopija ovog dokumenta', 'The search request has been submitted, so new messages should appear shortly - refresh to see them': 'Zahtjev za pretragom je podnesen, pa bi se nove poruke ubrzo trebale pojaviti. Osvježite da ih vidite.', 'The search results are now being processed with KeyGraph': 'Rezultate pretrage trenutno obrađuje KeyGraph', 'The search results should appear shortly - refresh to see them': 'Rezultati pretrage će se uskoro pojaviti - osvježite da ih vidite', 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'Serijski port na koji je modem priključen - npr. /dev/ttyUSB0 na linuxu i com1,com2 na Windowsu', 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'Server nije primio pravovremeni odgovor od drugog servera, kojem je pristupao da bi popunio zahtjev od strane pretraživača.', 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'Server je dobio pogrešan odgovor od drugog servera da je pristupio popunjavanju zahtjeva od strane browsera.', 'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'Registar skloništa prati sva skloništa i pohranjuje osnovne detalje o njima. U saradnji sa ostalim modulima prati ljude u skloništu, dostupne usluge itd.', 'The Shelter this person is checking into.': 'Sklonište u koje se prijavljuje ova osoba', 'The Shelter this Request is from': 'Sklonište iz kog je ovaj zahtjev', 'The Shelter this Request is from (optional).': 'Sklonište iz kog je ovaj zahtjev (opciono)', 'The site where this position is based.': 'Stranica na kojoj je ova pozicija bazirana.', 'The Source this information came from.': 'Izvor odakle je došla ova informacija', "The staff member's official job title": 'Zvanično radno mjesta člana osoblja', 'The staff responsibile for Facilities can make Requests for assistance.': 'Osoblje odgovorno za karakteristike može načiniti zahtjeve za pomoć.', 'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Osoblje zaduženo za ustanove može zahtijevati pomoć. Obaveze se mogu vršiti nesaglasno sa tim zahtjevima, ali oni ostaju otvoreni sve dok onaj ko je izdao zahtjev ne potvrdi da je on ispunjen.', 'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'Dotični događaj ne predstavlja više prijetnju niti zabrinjava i svaka slijedeća akcija je objašnjena u <instruction>', 'The subject of the alert (optional)': 'Tema ', 'The synchronization module allows the synchronization of data resources between Sahana Eden instances.': 'Sinhronizacijski modul omogućava sinhronizaciju podataka između kopija Sahana Eden.', 'The system supports 2 projections by default:': 'Sistem podržava 2 projekcije podrazumijevano:', 'The time at which the Event started.': 'Vrijeme u koje je događaj počeo.', 'The time at which the Incident started.': 'Vrijeme u koje je incident počeo.', 'The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones.': 'Vremenska razlika između UTC i vaše vremenske zone, navesti kao +HHMM za istočne ili -HHMM za zapadne vremenske zone.', 'The title of the page, as seen in the browser (optional)': 'Naslov stranice kakav se vidi u browser programu (opciono)', 'The token associated with this application on': 'Token povezan s ovom aplikacijom na', 'The Tracking Number %s ""is already used by %s.': 'Broj praćenja %s "" je već u upotrebi od strane %s.', 'The Unique Identifier (UUID) as assigned to this facility by the government.': 'Jedinstveni identifikator koji je pridružen ovom objektu od strane vlade', 'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.': 'Jedinstveni identifikator saradnika. Ostavite prazno ako suradnik nie Sahana Eden instanca, u tom slučaju će biti automatski dodijeljeno.', 'The unique identifier which identifies this instance to other instances.': 'Jedinstveni identifikator koji razlikuje ovu instancu od ostalih.', 'The uploaded Form is unreadable, please do manual data entry.': 'Poslani formular je nečitljiv, molim obavite ručni unos podataka.', 'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': 'URL za GetCapabilities stranicu Web Map Service (WMS) čiji slojevi su dostupni na mapi.', 'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'URL za GetCapabilities stranicu Web Map Service (WMS) čiji slojevi su dostupni na pregledničkom panelu mape.', "The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'URL datoteke slike. Ako ne dodate sliku, morate specificirati lokaciju ovdje.', 'The URL of your web gateway without the POST parameters': 'URL Vašeg web prolaza bez POST parametara', 'The URL of your web gateway without the post parameters': 'URL Vašeg web izlaza bez poštanskih parametara', 'The URL to access the service.': 'URL za pristup usluzi.', "The volunteer's role": 'Volonterska uloga', 'The way in which an item is normally distributed': 'Način na koji je stavka normalno distribuirana', 'The weight in kg.': 'Težina u kilogramima.', 'Theme': 'Tema', 'Theme added': 'Tema dodana', 'Theme added to Activity': 'Tema dodana u aktivnost', 'Theme added to Project': 'Tema dodana u projekat', 'Theme added to Project Location': 'Tema dodana u lokaciju projekta', 'Theme Data': 'Podaci teme', 'Theme Data deleted': 'Podaci teme obrisani', 'Theme Data updated': 'Podaci teme ažurirana', 'Theme deleted': 'Tema obrisana', 'Theme Details': 'Tematski detalji', 'Theme Layer': 'Sloj teme', 'Theme removed from Activity': 'Tema uklonjena iz aktivnosti', 'Theme removed from Project': 'Tema uklonjena iz projekta', 'Theme removed from Project Location': 'Tema uklonjena iz lokacije projekta', 'Theme updated': 'Tema ažurirana', 'Themes': 'Teme', 'There are errors': 'Postoje greške', 'There are insufficient items in the Inventory to send this shipment': 'Nema dovoljno artikala u skladištu kako bi se poslala dostava', 'There are more than %(max)s results, please input more characters.': 'Ima više od %(max)s rezultata, molim unesite više znakova.', 'There are multiple records at this location': 'Ima više zapisa na ovoj lokaciji', 'There are no contacts available for this person!': 'Nema kontakta dostupnih za ovu osobu!', "There are no details for this person yet. Add Person's Details.": 'Nema detalja za ovu osobu. Dodajte detalje osobe', 'There are not sufficient items in the Inventory to send this shipment': 'Nema dovoljno artikala u skladištu kako bi se poslala dostava', 'There are not sufficient items in the store to send this shipment': 'Nema dovoljno artikala u skladištu kako bi se poslala dostava', 'There are too many features, please Zoom In or Filter': 'Ima previše karakteristika, uvećajte sliku ili filtrirajte', 'There is insufficient data to draw a chart from the questions selected': 'Nedovoljno je podataka za iscrtavanje dijagrama iz izabranih pitanja', 'There is no address for this person yet. Add new address.': 'Još ne postoji adresa za ovu osobu. Dodaj novu adresu.', 'There is no status for this %(site_label)s yet. Add %(site_label)s Status.': 'Još nema statusa za %(site_label)s. Dodajte %(site_label)s tatus.', 'There was a problem, sorry, please try again later.': 'Došlo je do problema, ispričavamo se, pokušajte ponovno kasnije.', 'These are settings for Inbound Mail.': 'Ovo su postavke za Inbound Mail', 'These are the filters being used by the search.': 'Postoje filteri korišteni pretragom.', 'These are the Incident Categories visible to normal End-Users': 'Ovo su kategorije slučajeva, vidljive običnim krajnjim korisnicima.', 'These need to be added in Decimal Degrees.': 'Moraju biti uneseni u decimalnim stepenima.', 'They': 'Oni', 'thick': 'debelo', 'thin': 'tanko', 'this': 'ova', 'This adjustment has already been closed.': 'Ovo pilagođenje je već zatvoreno', 'This appears to be a duplicate of': 'Ovo je duplikat od', 'This appears to be a duplicate of ': 'Ovo izgleda kao duplikat od ', 'This email address is already in use': 'Ova email adresa je već u upotrebi', 'This email-address is already registered.': 'Ova email adresa je već registrovana', 'This file already exists on the server as': 'Ovaj fajl vec postoji na serveru kao', 'This form allows the administrator to remove a duplicate location.': 'Ovaj formular omogućava administratoru da ukloni duple lokacije.', 'This Group has no Members yet': 'Ova grupa još nema članova', 'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'Ovo je prikladno ako je ovaj nivo u izgradnji. Da bi se spriječile slučajne modifikacije nakon što se ovaj nivo završi, ovo se može postaviti na Netačno', 'This is normally edited using the Widget in the Style Tab in the Layer Properties on the Map.': 'Ovo se normalno uređje koristeći grafičke kontrole u kartici stilova, svojstava sloja na mapi.', 'This is required if analyzing with KeyGraph.': 'Ovo je potrebno ako se analizira s KeyGraph.', 'This is the full name of the language and will be displayed to the user when selecting the template language.': 'Ovo je puno ime jezika i bit će prikazano korisniku kada se odabira jezik predloška.', 'This is the short code of the language and will be used as the name of the file. This should be the ISO 639 code.': 'Ovo je kratko ime jezika i bit će korišteno kao ime datoteke. Ovo treba biti ISO 639 šifra,', 'This is the way to transfer data between machines as it maintains referential integrity.': 'Ovo je način za prenos podataka između mašina, jer održava referencijalni integritet.', 'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'Ovo je način prenosa podataka između mašina koje održavaju referencijalni integritet... Duplicirane datoteke bi prvo trebalo ručno ukloniti!', "This isn't visible to the published site, but is used to allow menu items to point to the page": 'Ovo nije vidjivo objavljenom sajtu, ali se koristi da se dopusti stavkama menija da pokazuju na stranicu', "This isn't visible to the recipients": 'Ovo nije vidljivo primaocima', 'This job has already been finished successfully.': 'Ovaj posao je već uspješno završen', 'This level is not open for editing.': 'Ovaj nivo nije otvoren za izmjene.', 'This might be due to a temporary overloading or maintenance of the server.': 'Ovo može biti zbog privremenog opterećenja ili održavanja servera.', 'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': 'Ovaj modul omogućava stavke iz inventara da budu zahtjevane i dostavljene između različitih objekata.', 'This module allows Warehouse Stock to be managed, requested & shipped between the Warehouses and Other Inventories': 'Ovaj modul omogućava da se zalihe skladišta održavaju, zahtijevaju i isporučuju između skladišta i drugih mjesta zaliha', 'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Ovaj modul omogućuje upravljanje događajima - bilo da su prethodno planirani (npr. vježbe) ili incidenti koji se trenutno odvijaju. Možete dodijeliti odgovarajuća sredstva (ljude, alate i postrojenja), tako da oni mogu biti lako mobilizirani.', 'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Ovaj modul dopušta planiranje scenarija za vježbe i događaje. Možete alocirati prikladne resurse (ljudstvo, sredstva i objekte) tako da mogu lako mobilizirati.', 'This page shows you logs of past syncs. Click on the link below to go to this page.': 'Ova stranica prikazuje zapisnike prethodnih sinhronizacija. Kliknite na link ispod, kako biste ušli na ovu stranicu.', 'This resource is already configured for this repository': 'Resurs je već konfigurisan za ovaj repozitorij', 'This role can not be assigned to users.': 'Ova uloga se ne može dodijeliti korisnicima', 'This screen allows you to upload a collection of photos to the server.': 'Ovaj prozor Vam dozvoljava da uploadujete kolekciju slika na server.', 'This setting can only be controlled by the Administrator.': 'Ovo podešavanje može kontrolisati samo administrator.', 'This shipment contains %s items': 'Ova dostava sadrži %s stavki', 'This shipment contains one line item': 'Ova isporuka ima stavku od jedne linije', 'This shipment has already been received & subsequently canceled.': 'Ova isporuka je već bila primljena i odmah otkazana.', 'This shipment has already been received.': 'Ova dostava je već primljena.', 'This shipment has already been sent.': 'Ova dostava je već poslana.', 'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'Ova isporuka nije poslana - NIJE otkazana zato što još uvijek može biti preuređena.', 'This shipment has not been returned.': 'Ova isporuka nije vraćena', 'This shipment has not been sent - it cannot be returned because it can still be edited.': 'Ova isporuka nije bila poslana - ne može se vratiti jer ju je još moguće mijenjati.', 'This shipment has not been sent - it has NOT been canceled because it can still be edited.': 'Ova isporuka nije poslana - NIJE otkazana zato što još uvijek može biti preuređena.', 'This shipment will be confirmed as received.': 'Ova isporuka bit će potvrđena prilikom prijema.', 'This should be an export service URL, see': 'Ovo treba biti izvozni URL servisa, vidi', 'This Team has no Members yet': 'Ovaj tim još nema članova', 'Thunderstorm': 'Grmljavina', 'Thursday': 'Četvrtak', 'Ticket': 'Kartica', 'Ticket added': 'Dodana kartica', 'Ticket deleted': 'Kartica je poništena', 'Ticket Details': 'Pojedinosti kartica', 'Ticket ID': 'ID kartice', 'Ticket updated': 'Kartica izmjenjena', 'Ticketing Module': 'Modul sa karticama', 'Tickets': 'Kartice', 'Tiled': 'popločano', 'Tilt-up concrete': 'Ispupčen beton', 'Timber frame': 'Okvir od dasaka', 'Time': 'Vrijeme', 'Time Actual': 'Stvarno vrijeme', 'Time at which data was exchanged.': 'Vrijeme u konme su podaci razmijenjeni', 'Time Estimate': 'Procjena vremena', 'Time Estimated': 'Potrebno vrijeme', 'Time Frame': 'Vremenski okvir', 'Time In': 'Vrijeme unutra', 'Time in Cache (h:m:s)': 'Vrijeme u kešu (h:m:s)', 'Time Log': 'Vremenski zapis', 'Time Log Deleted': 'Vremenski zapis izbrisan', 'Time Log Updated': 'Vremenski zapis ažuriran', 'Time Logged': 'Vrijeme prijave', 'Time needed to collect water': 'Vrijeme potrebno za sakupljanje vode', 'Time of Request': 'Vrijeme zahtjeva', 'Time Out': 'Vrijeme vani', 'Time Question': 'Vremensko pitanje', 'Time Taken': 'Potrošeno vrijeme', 'Timeline': 'Vremenska crta', 'Timeline Report': 'Izvještaj o vremenskom okviru', 'times': 'puta', 'times (0 = unlimited)': 'puta (0=neograničeno)', 'times and it is still not working. We give in. Sorry.': 'puta i još uvjek ne radi. Odustajemo. Žao nam je.', 'Times Completed': 'Puta završen', 'Timestamp': 'Vremenska oznaka', 'Timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Vremenske oznake se mogu povezati sa oznakama na fotografijama kako bi ih locirali na karti.', 'Title': 'Naslov', 'Title to show for the Web Map Service panel in the Tools panel.': 'Naziv koji će se prikazati za panel Usluge Web Mape u panelu Alati.', 'TMS Layer': 'TMS sloj', 'To': 'Za', 'To %(site)s': 'Za %(site)s', 'To access Sahana documentation, go to': 'Da pristupite Sahana dokumentaciji, idite na', 'to access the system': 'da pristupite sistemu', 'To begin the sync process, click the button on the right =>': 'Da se započne proces sinhronizacije, pritisnite dugme desno =>', 'To begin the sync process, click the button on the right => ': 'Da biste započeli proces sinhronizacije, kliknite na dugme desno => ', 'To begin the sync process, click this button =>': 'Da biste započeli proces sinhronizacije, pritisnite ovo dugme =>', 'To begin the sync process, click this button => ': 'Da biste započeli proces sinhronizacije, pritisnite ovo dugme => ', 'To create a personal map configuration, click': 'Da kreirate konfiguraciju lične mape, kliknite', 'To create a personal map configuration, click ': 'Za kreiranje konfiguracije lične mape, pritisnite ', 'to download a OCR Form.': 'da bi se skinula OCR forma.', 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'Za uređivanje OpenStreetMap, potrebno je urediti OpenStreetMap opcije u modelima/000_config.py', 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in your Map Config': 'Da uredite OpenStreetMap, trebate promijeniti OpenStreetMap postavke u konfiguraciji mape', 'To Location': 'Prema lokaciji', 'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.': 'Da pomjerite vremenski liniju: koristite točkić miša, tastere s strelicama ili grabite i prevucite vremensku liniju', 'To Organization': 'Za organizaciju', 'To Person': 'Za osobu', 'To Print or Share the Map you will have to take a screenshot. If you need help taking a screen shot, have a look at these instructions for %(windows)s or %(mac)s': 'Da štampate ili dijelite mapu trebate uzeti sliku ekrana. Ako vam treba pomoć oko uzimanja slike ekrana pogledajte instrukcije za %(windows)s ili %(mac)s', 'to reset your password': 'da resetujete lozinku', 'To search by job title, enter any portion of the title. You may use % as wildcard.': 'Za pretraživanje po nazivu posla, unesi bilo koji dio naziva. Mozete koristiti % kao džoker znak', "To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Da biste izvršili pretragu po imenu osobe, unesite bilo koje od imena, srednjih imena ili prezimena, razdvojenih razmacima. Možete koristiti % kao znak koji će zamijeniti bilo koji karakter ili niz karaktera. Pritisnite 'Traži' bez ikakvog unosa da biste izlistali sve osobe.", 'To search for a body, enter the ID ""tag number of the body. You may use ""% as wildcard.': 'Da tražite tijelo, unesite ID "" broj oznake tijela. Možete koristiti ""% kao džoker.', "To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": 'Za traženje tijela, unesite ID tag broj tijela. Možete koristiti % kao dzoker. Pritisnite "Trazi" bez ulaza na popis svih tijela.', "To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": 'Za traženje tijela, unesite ID oznaku tijela. Možete koristiti % kao dzoker. Pritisnite "Traži" bez ulaza na popis svih tijela.', "To search for a hospital, enter any of the names or IDs of the hospital, or the organisation name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Da biste potražili bolnicu, unesite bilo koje od imena ili IDova bolnice, ime organizacije ili njen akronim razdvojeno razmakom. Možete koristiti % kao zamjenske karaktere. Pritisnite 'Pretraži' bez unesenih stavki da izlistate sve bolnice.", "To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Za pretragu bolnica, unesite bilo koje od imena ili pripadni broj bolnice, sa razmacima. Možete koristiti i % umjesto razmaka. Pritisnite 'Search' (traži) i bez nabrajanja svih bolnica .", "To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "Da tražite lokaciju, ukucajte ime. Možete koristiti % kao zamjenu. Pritisnite 'Search ' bez unosa da izlistate sve lokacije", "To search for a member, enter any portion of the name of the person or group. You may use % as wildcard. Press 'Search' without input to list all members.": "Da tražite člana unesite neki dio imena osobe ili grupu. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih članova.", "To search for a patient, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all patients.": "Za pretragu pacijenta unesite ime, prezime ili srednje ime odvojene razmakom. Mozete koristiti % kao zamjenu. Pritisnite 'Pretraga' bez unesenih vrijednosti za ispis svih pacijenata.", 'To search for a person, enter any of the ""first, middle or last names and/or an ID ""number of a person, separated by spaces. ""You may use % as wildcard.': 'Da tražite osobu unesite "" prvo, srednje ime ili prezime i/ili ID "" broj osobe, razdvojen razmacima. ""Moćete koristiti % kao džoker.', "To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'Da biste tragali za osobom, unesite bilo koje - ime, srednje ime ili prezime i broj lične karte osobe, odvojeno razmacima. Možete koristiti znak % umjesto džokera. Pritisnite "Traži" bez ulaza da vam izlista sve osobe.', "To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Da biste izvršili pretragu po imenu osobe, unesite bilo koje od imena, srednjih imena ili prezimena, razdvojenih razmacima. Možete koristiti % kao znak koji će zamijeniti bilo koji karakter ili niz karaktera. Pritisnite 'Search' (Pretraga) bez ikakvog unosa da biste izlistali sve osobe.", "To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": 'Za traženje zahtjeva unesite neki tekst koji tražite. Možete koristiti % kao dzoker. Pritisnite "Traži" bez ulaza na popis svih tijela.', "To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": 'Za traženje procjene ukucajte bilo koji dio broja kartice za procjenu. Možete koristiti % kao dzoker. Pritisnite "Trazi" bez ulaska u cijelu listu procjena.', 'To Site': 'Za mjesto', 'To submit a new job, use the': 'Da unesete novi posao, koristite', 'To variable': 'Do varijable', 'to verify your email': 'da potvrdite vaš email', 'ton': 'tona', 'tonsure': 'ćela s vijencom kose', 'Tools': 'Alatke', 'Tools & Equipment': 'Alati i oprema', 'total': 'ukupno', 'Total': 'Ukupno', 'Total # of Beneficiaries Reached': 'Ukupno # ciljanih korisnika dosegnuto', 'Total # of households of site visited': 'Ukupan broj posjećenih domaćinstava mjesta', 'Total # of Target Beneficiaries': 'Ukupno # ciljanih korisnika', 'Total Affected': 'Ukupno oštećenih', 'Total Annual Budget': 'Ukupni godišnji budžet', 'Total Beds': 'Ukupno kreveta', 'Total Beneficiaries': 'Ukupno korisnika', 'Total Cost': 'Ukupni trošak', 'Total Cost per Megabyte': 'Ukupan trošak po megabajtu', 'Total Cost per Minute': 'Ukupni troškovi po minuti', 'Total Dead': 'Ukupno mrtvih', 'Total Funding (Local Currency)': 'Ukupni fondovi (lokalna valuta=', 'Total Funding Amount': 'Ukupan iznos fonda', 'Total gross floor area (square meters)': 'Ukupna površina poda (u kvadratnim metrima)', 'Total Households': 'Ukupan broj domaćinstava', 'Total Injured': 'Ukupno povrijeđenih', 'Total Locations': 'Ukupno lokacija', 'Total Monthly': 'Ukupno mjesečno', 'Total Monthly Cost': 'Ukupni mjesečni trošak', 'Total Monthly Cost:': 'Ukupni mjesečni trošak:', 'Total Monthly Cost: ': 'Ukupni mjesečni trošak: ', 'Total No of Affectees (Including Students, Teachers & Others)': 'Ukupan broj obuhvaćenih (uključujući učenike, nastavnike i ostale)', 'Total No of Students (Primary To Higher Secondary) in the Total Affectees': 'Ukupan broj učenika (osnovne i srednje škole) od ukupno pogođenih', 'Total No of Teachers & Other Govt Servants in the Total Affectees': 'Ukupno u nastavi i drugim vladinim uslugama u ukupnom broju pogođenih', 'Total number of beds in this facility. Automatically updated from daily reports.': 'Ukupan broj kreveta u ovom objektu. Automatski se ažurira iz dnevnih izvještaja', 'Total number of beds in this hospital. Automatically updated from daily reports.': 'Ukupan broj kreveta u ovoj bolnici. Automatski se ažurira iz dnevnih izvještaja.', 'Total number of houses in the area': 'Ukupan broj kuća u području', 'Total Number of Resources': 'Ukupan broj resursa', 'Total number of schools in affected area': 'Ukupni broj škola u zahvaćenim područjima', 'Total One-time Costs': 'Ukupni jednokratni troškovi', 'Total Persons': 'Ukupan broj osoba', 'Total Population': 'Ukupna populacija', 'Total population of site visited': 'Ukupan broj stanovnika posjećenog mjesta', 'Total Records: %(numrows)s': 'Ukupno zapisa: %(numrows)s', 'Total Recurring Costs': 'Ukupni ponavljajući troškovi', 'Total Unit Cost': 'Totalni jedinični trošak', 'Total Unit Cost:': 'Ukupni jedinični trošak:', 'Total Unit Cost: ': 'Ukupna cijena jedinice: ', 'Total Units': 'Ukupno jedinica', 'Total Value': 'Ukupna vrijednost', 'Totals for Budget:': 'Ukupni iznos Budžeta:', 'Totals for Bundle:': 'Ukupno po paketu:', 'Totals for Kit:': 'Ukupni iznosi za komplet:', 'Tour added': 'Tura dodana', 'Tour Configuration': 'Struktura ture', 'Tour deleted': 'Tura obrisana', 'Tour Details': 'Detalji ture', 'Tour Name': 'Ime ture', 'Tour updated': 'Tura ažurirana', 'Tour User': 'Korisnik ture', 'Tourist Group': 'Grupa turista', 'Tours': 'Ture', 'Town': 'Grad', 'Traceback': 'Praćenje', 'Traces internally displaced people (IDPs) and their needs': 'Prati ljude sa mentalnim poremećajima i njihove potrebe', 'Tracing': 'Praćenje', 'Track': 'Praćenje', 'Track deleted': 'Praćenje obrisano', 'Track Details': 'Prati detalje', 'Track Shipment': 'Prati pošiljku', 'Track updated': 'Praćenje ažurirano', 'Track uploaded': 'Praćenje učitano', 'Track with this Person?': 'Pratiti sa ovom Osobom?', 'Trackable': 'Moguće pratiti', 'Tracking and Tracing of Persons and Groups': 'Praćenje osoba i grupa', 'Tracking of basic information on the location, facilities and size of the Shelters': 'Praćenje osnovnih informacija na lokaciji, ustanova i veličine skloništa.', 'Tracking of Patients': 'Praćenje pacijenata', 'Tracking of Projects, Activities and Tasks': 'Praćenje projekata, aktivnosti i dešavanja', 'Tracks': 'Staze', 'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Prati lokaciju, distribucije, kapacitet i podjelu žrtava u skloništima', 'Traffic Report': 'Izvještaj o prometu', 'Training': 'Obuka', 'Training added': 'Dodan trening', 'Training Course Catalog': 'Katalog o kursu treniranja', 'Training deleted': 'Obrisan trening', 'Training Details': 'Detalji treninga', 'Training Event': 'Događaj obuke', 'Training Event added': 'Događaj obuke dodan', 'Training Event deleted': 'Događaj obuke obrisan', 'Training Event Details': 'Detalji o događaju obuke', 'Training Event updated': 'Događaj obuke ažuriran', 'Training Events': 'Događaji obuke', 'Training Facility': 'Lokacija obuke', 'Training Hours (Month)': 'Sati obuke (mjesečno)', 'Training Hours (Year)': 'Sati obuke (godišnje)', 'Training Report': 'Izvještaj obuke', 'Training updated': 'Ažuriran trening', 'Trainings': 'Treninzi', 'Transfer': 'Prijenos', 'Transfer Ownership': 'Prijenos vlasništva', 'Transfer Ownership To (Organization/Branch)': 'Prebaci vlasništvo na (organizacija/ogranak)', 'Transit': 'Tranzit', 'Transit Status': 'Status tranzita', 'Transit. Status': 'Status tranzita', 'Transition Effect': 'Efekat tranzicije', 'Translate': 'Prevedi', 'Translated File': 'Prevedena datoteka', 'Translation': 'Prevod', 'Translation Functionality': 'Funkcionalnost prijevoda', 'Transnistria': 'Pridnjestrovska', 'Transparent?': 'Providno?', 'Transport Reference': 'Transportna referenca', 'Transportation assistance, Rank': 'Prevozna pomoć, stepen', 'Transportation Required': 'Prijevoz je potreban', 'Transported By': 'Prevoznik', 'Transported by': 'Prevoznik', 'Trauma Center': 'Centar za traume', 'Travel Cost': 'Troškovi putovanja', 'Treatments': 'Tretmani', 'Tree': 'Stablo', 'Trinidad and Tobago': 'Trinidad i Tobago', 'Tropical Storm': 'Tropska Oluja', 'Tropo Messaging Token': 'Tropo token za poruke', 'Tropo Settings': 'Tropo postavke', 'Tropo settings updated': 'Twilio postavke ažurirane', 'Tropo Voice Token': 'Tropo simbol glasa', 'Truck': 'Kamion', 'Try checking the URL for errors, maybe it was mistyped.': 'Pokušajte provjeriti greške u URL-u, možda je pogrešno napisan.', 'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Pokušajte sa pritiskom na dugme za osvježavanje/ponovo učitavanje ili ponovnim unosom URL u adresnoj traci.', 'Try refreshing the page or hitting the back button on your browser.': 'Pokušajte osvježiti stranicu ili pritisnuti dugme za povratak nazad u Vašem pregledniku.', 'Tsunami': 'Cunami', 'Tuesday': 'Utorak', 'Tugboat Capacity': 'Kapacitet skele', 'Tunisia': 'Tunis', 'Turkey': 'Turska', 'turned up': 'okrenut gore', 'turning grey': 'postaje sivo', 'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'tweepy modul nije dostupan u radu sa tekućom verzijom Pythona - potrebna je instalacija non-Tropo Twitter podrške', 'Tweet deleted': 'Tweet ibrisan', 'Tweet Details': 'Tweet detalji', 'Tweeted By': 'Tweet obavio', 'Tweeted by': 'Tweet obavio', 'Tweeted on': 'Tweet datum', 'Tweeted On': 'Tweet datum', 'Twilio (Inbound)': 'Twilio (dolazni)', 'Twilio Setting added': 'Twilio postavke dodane', 'Twilio Setting deleted': 'Twilio postavke obrisane', 'Twilio Setting Details': 'Detalji Twilio postavki', 'Twilio Settings': 'Twilio postavke', 'Twilio settings updated': 'Twilio postavke ažurirane', 'Twilio SMS Settings': 'Twilio SMS postavke', 'Twilio SMS settings': 'Twilio SMS postavke', 'Twitter account updated': 'Twitter nalog ažuriran', 'Twitter ID or #hashtag': 'Twitter ID ili #hashtag', 'Twitter InBox': 'Twitter ulazni sandučić', 'Twitter Search': 'Pretraži Twitter', 'Twitter Search Queries': 'Opcije Twitter pretrage', 'Twitter Search Results': 'Rezultati Twitter pretrage', 'Twitter Settings': 'Postavke Twittera', 'Twitter Timeline': 'Twitter vremenska linija', 'Type': 'Tip', 'Type of cause': 'Tip uzroka', 'Type of Construction': 'Vrsta izgradnje', 'Type of place for defecation': 'Vrsta mjesta za obavljanje nužde', 'Type of Transport': 'Vrsta transporta', 'Type of water source before the disaster': 'Tipovi izvora vode prije nepogode', "Type the first few characters of one of the Participant's names.": 'Upiši prvih nekoliko slova imena jedne od osoba.', "Type the first few characters of one of the Person's names.": 'Upiši prvih nekoliko slova imena jedne od osoba.', "Type the name of an existing catalog item OR Click 'Create Item' to add an item which is not in the catalog.": "Navedite ime postojeće stavke kataloga ili kliknite 'Kreiraj stavku' da dodate novu stavku koja nije u katalogu.", 'Type the name of an existing catalog kit': 'Unesite ime postojećeg kompleta iz kataloga', "Type the name of an existing site OR Click 'Create Warehouse' to add a new warehouse.": "Kreirajte ime postojećeg mjesta ili kliknite na 'Kreiraj skladište' da dodate novo skladište.", 'Types': 'Tipovi', 'Types of health services available': 'Dostupni tipovi zdravstvene zaštite', 'Types of water storage containers available': 'Dostupni tipovi spremnika za vodu', 'UID': 'JIB', 'Ukraine': 'Ukrajina', 'UN agency': 'UN agencija', 'Un-Repairable': 'Nepopravljiv', 'Unable to find sheet %(sheet_name)s in uploaded spreadsheet': 'Ne mogu naći list %(sheet_name)s u postavljenoj tablici', 'Unable to open spreadsheet': 'Ne mogu da otvorim tablicu', 'unable to parse csv file': 'ne mogu analizirati csv datoteku', 'Unable to parse CSV file or file contains invalid data': 'Ne mogu analizirati CSV datoteku ili datoteka sadrži nevažeće podatke', 'Unable to parse CSV file!': 'Nije moguće analizirati CSV dokument !', 'unapproved': 'neodobreno', 'Unassigned': 'Nedodijeljeno', 'Uncheck all': 'Skini sve oznake', 'uncheck all': 'poništi sve oznake', 'uncovered': 'nepokriveno', 'Under which condition a local record shall be updated if it also has been modified locally since the last synchronization': 'Pod kojim uslovima bi lokalni zapisi trebali biti ažurirani ako su također lokalno mijenjani nakon zadnje sinhronizacije', 'Under which conditions local records shall be updated': 'Pod kojim uslovima bi lokalni zapisi trebali biti ažurirani', 'Understaffed': 'nema dovoljno zaposlenih', 'Unidentified': 'Neidentifikovano', 'unidentified': 'Neidentificiran', 'uninhabitable = foundation and structure destroyed': 'nenaseljivo = temeljji i strukura uništeni', 'Union Council': 'Vijeće saveza', 'Unique code': 'Jedinstveni kôd', 'Unique identifier which THIS repository identifies itself with when sending synchronization requests.': 'Jedinstevni identifikator kojim OVAJ repozitorij definiše sebe samog slanjem sinhronizacijskih zahtjeva.', 'Unique Locations': 'Jedinstvene lokacije', 'Unit': 'Jedinica', 'Unit added': 'Jedinica dodana', 'Unit Bed Capacity': 'Kapacitet kreveta po jedinici', 'Unit Cost': 'Troškovi jedinice', 'Unit deleted': 'Jedinica obrisana', 'Unit Details': 'Detalji jedinice', 'Unit of Measure': 'Jedinica mjere', 'Unit Set': 'Jedinica postavljena', 'Unit Short Code for e.g. m for meter.': 'Kratko ime jedinice, npr m za metar', 'Unit updated': 'Jedinica ažurirana', 'Unit Value': 'Vrijednost jedinice', 'United Arab Emirates': 'Ujedinjeni Arapski Emirati', 'United Kingdom': 'Ujedinjeno Kraljevstvo', 'United States Dollars': 'američki dolari', 'Units': 'Jedinice', 'Units of Measure': 'Mjerna jedinica', 'Unknown': 'Nepoznato', 'unknown': 'Nepoznato', 'Unknown Locations': 'Nepoznate lokacije', 'Unknown Peer': 'Nepoznati saradnik', 'Unknown question code': 'Nepoznata šifra pitanja', 'Unknown type of facility': 'Nepoznata vrsta objekta', 'unlimited': 'neograničeno', 'Unloading': 'Pražnjenje', 'Unmark as duplicate': 'Ukloni oznaku kao duplo', 'Unreinforced masonry': 'Zid bez armature', 'Unresolved Conflicts': 'Neriješeni konflikti', 'Unsafe': 'Nesiguran', 'Unselect to disable the modem': 'Uklonite oznaku da biste isključili modem', 'Unselect to disable this API service': 'Izbriši oznaku da onemogućiš ovu API uslugu', 'Unselect to disable this SMTP service': 'Poništite odabir da bi onemogućili ovu SMTP uslugu', 'Unsent': 'Nije poslano', 'Unskilled': 'Neiskusan', 'unspecified': 'nije navedeno', 'Unsubscribe': 'Otkaži pretplatu', 'Unsupported data format!': 'Nepodržan format podataka!', 'Unsupported method!': 'Nepodržana metoda!', 'unverified': 'nepotvrđeno', 'Update': 'Ažuriranje', 'update': 'ažuriraj', 'Update Activity Report': 'Ažuriraj izvještaj o aktivnostima', 'Update Base Location': 'Ažuriraj baznu lokaciju', 'Update Cholera Treatment Capability Information': 'Ažuriraj informacije o sposobnosti liječenja kolere', 'Update Coalition': 'Ažuriraj koaliciju', 'Update if Master': 'Ažuriraj ako je glavno', 'update if master': 'ažuriraj ako je glavno', 'update if newer': 'ažurirajte ako je novije', 'Update if Newer': 'Ažurirajte ako je novije', 'Update Import Job': 'Ažuriraj posao za uvoz', 'Update Location': 'Ažuriraj lokaciju', 'Update Map': 'Ažuriraj mapu', 'Update Master file': 'Ažuriraj glavnu datoteku', 'Update Method': 'Metod ažuriranja', 'Update Morgue Details': 'Ažuriraj detalje mrtvačnice', 'Update Notification': 'Ažuriraj napomen u', 'Update Policy': 'Pravila ažuriranja', 'Update Report': 'Ažuriraj izvještaj', 'Update Request': 'Ažuriraj zahtjev', 'Update Service Profile': 'Ažuriraj profil usluge', 'Update Status': 'Ažuriraj status', 'Update Task Status': 'Ažuriraj status zadatka', 'Update this entry': 'Ažuriraj ovaj unos', 'Update Unit': 'Ažuriranje jedinice', 'Update your current ordered list': 'Ažuriraj trenutni uređeni spisak', 'Update/Newer': 'Ažuriraj/novije', 'Update:': 'Ažuriraj:', 'updated': 'ažurirano', 'Updated By': 'Ažurirano od', 'updates only': 'samo ažuriranja', 'Upload': 'Pošalji', 'Upload a (completely or partially) translated CSV file': 'Postavi (djelomično ili potpuno) prevedenu CSV datoteku', 'Upload a CSV file': 'Dodaj CVS datoteku', 'Upload a CSV file formatted according to the Template.': 'Učitaj fajl formata CSV prema šablonu.', 'Upload a Question List import file': 'Postavi uvoznu datoteku s listom pitanja', 'Upload a Spreadsheet': 'Slanje proračunskih tablica (spreadsheet)', 'Upload a text file containing new-line separated strings:': 'Postavi tekstualnu datoteku koja sadrži nizove znakova razdvojene novim redovima', 'Upload an Assessment Template import file': 'Postavi uvoznu datoteku za predložak pricjene', 'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Učitajte sliku (bmp, gif, jpeg ili png), max. 300x300 !', 'Upload an image file (png or jpeg), max. 400x400 pixels!': 'Učitajte sliku (jpeg ili png), maks. 400x400 !', 'Upload an image file here.': 'stavite sliku ovdje', "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'Ovdje učitajte slikovnu datoteku. Ako ne učitate slikovnu datoteku, onda morate specificirati njenu lokaciju u URL polju.', 'Upload an image, such as a photo': 'Podesi sliku,kao sto je fotografija', 'Upload Comma Separated Value File': 'Uploaduj datoteku vrijednosti odvojenih zarezom', 'Upload Completed Assessment Form': 'Postavi završen formular ocjene', 'Upload file': 'Postavi datoteku', 'Upload Format': 'Dodati format', 'Upload OCR Form': 'Pošalji OCR formu (optičko prepoznavanje karaktera)', 'Upload Photos': 'Učitaj fotografije', 'Upload Scanned OCR Form': 'Pošalji skenirani OCR formular', 'Upload Shapefile': 'Postavi datoteku s likovima', 'Upload Spreadsheet': 'Pošaljite tabelu proračuna', 'Upload the Completed Assessment Form': 'Postavi završen formular ocjene', 'Upload Track': 'Pošalji praćenje', 'Upload translated files': 'Pošalji prevedene datoteke', 'Upload Web2py portable build as a zip file': 'Pošalji Web2py portabilni sagrađen kao zip datoteka', 'Uploaded': 'Postavljeno', 'Uploaded file is not a PDF file. Provide a Form in valid PDF Format.': 'Postavljena datoteka nije PDF datoteka. Navedite formular u važećem PDF firmatu.', "Uploaded file(s) are not Image(s). Supported image formats are '.png', '.jpg', '.bmp', '.gif'.": "Postavljena datoteka ili datoteke ne predstavljaju slike. Podržani formati slika su '.png', '.jpg', '.bmp', '.gif'.", 'Uploaded PDF file has more/less number of page(s) than required. Check if you have provided appropriate revision for your Form as well as check the Form contains appropriate number of pages.': 'Postavljena PDF datoteka ima više/manje brojeva stranica nego što je potrebno. Provjerite da li ste naveli pravu reviziju za vaš formular i provjerite da li formular sadrži potreban broj stranica.', 'Urban area': 'Urbano područje', 'Urban Fire': 'gradski požar', 'Urban Tank Tactical Vehicle': 'Gradska taktička pokretna cisterna', 'Urgent': 'Hitno', 'urgent': 'hitno', 'URL for the Mobile Commons API': 'URL za Mobile Commons API', 'URL for the twilio API.': 'URL za twilio API.', 'URL of the default proxy server to connect to remote repositories (if required). If only some of the repositories require the use of a proxy server, you can configure this in the respective repository configurations.': 'URL podrazumijevanog Proxy servera za vezu s udaljenim repozitorijima (ako je potrebno). Ako samo neki repozitoriji zahtijevaju proxy server, možete to konfigurisatu.', 'URL of the proxy server to connect to the repository (leave empty for default proxy)': 'URL Proxy servera za vezu s repozitorijima (prazno za podrazumijevani proxy server)', 'URL of the Ushahidi instance': 'URL Ushahidi instance', 'URL to a Google Calendar to display on the project timeline.': 'URL za Google Calendar za prikaz projektne vremenske linije', 'URL to resume tour': 'URL da se nastavi tura', 'Uruguay': 'Urugvaj', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Koristite (...) & (...) za I, (...) | (...) za ILI, i ~ (...) za NE za izgradnju složenijih upita.', 'Use decimal': 'Koristi decimalno', 'Use default': 'Koristi podrazumjevano', 'Use default from feature class': 'Koristi podrazumijevano iz klase karakteristika', 'Use deg, min, sec': 'Koristi Stepeni Minute Sekunde', 'Use Geocoder for address lookups?': 'Koristi Geocoder za traženje adrese?', 'Use Site?': 'Da li koristiti mjesto?', 'Use these links to download data that is currently in the database.': 'Koristi ove linkove za skidanje podataka koji su trenutno u bazi.', 'Use this link to review the situation.': 'Koristite ovaj link za pregled situacije', 'Use this to set the starting location for the Location Selector.': 'Koristite ovo da podesite početnu lokaciju za Odabirač Lokacije', 'Used by IRS & Assess': 'Korišteno od strane IRS & Assess', 'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Koristi se u onHover Tooltip & iskočnim prozorima skupova pri razlikovanju tipova', 'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Korišteno za izgradnju onHover Tooltip i prvo polje je korišteno u iskočnim prozorima skupova za razlikovanje zapisa.', 'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Korisit se za provjeru razumnosti geografske širine unesene lokacije. Može se koristiti za filter liste resursa koji imaju klokacije', 'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Koristi se za provjeru geografske dužine unesenih lokacija. Može se koristiti kao filter lista izvora koje posjeduju lokacije.', 'Used to import data from spreadsheets into the database': 'Korišteno da se unesu podaci iz Tabele u bazu podataka', 'Used to populate feature attributes which can be used for Styling.': 'Korišteno za punjenje atributa karakteristika korištenih za stilove.', 'Used within Inventory Management, Request Management and Asset Management': 'Korišteno za vođenje inventara, pri upravljanju zahtjevima i upravljanju sredstvima', 'User': 'Korisnik', 'User %(id)s Logged-in': 'Korisnik %(id)s prijavljen', 'User Account': 'Korisnički nalog', 'User Account has been Approved': 'Korisnički nalog je potvrđen', 'User Account has been Disabled': 'Korisnički račun je onemogućen', 'User added': 'Korisnik dodan', 'User added to Role': 'Korisnik dodan u ulogu', 'User already has this role': 'Korisnik već ima datu ulogu', 'User already in Group!': 'Korisnik već u grupi', 'User deleted': 'Korisnik obrisan', 'User Details': 'Korisnički detalji', 'User Guidelines Synchronization': 'Sinhronizacija korisničkih smjernica rada', 'User has been (re)linked to Person and Human Resource record': 'Korisnik je ponovo vezan za zapis o osoblju i ljudskim resursima.', 'User has no Email address!': 'Korisnik nema e-mail adrese!', 'User has no SMS address!': 'Korisnik nema SMS adrese!', 'User ID': 'Korisnički ID', 'User Management': 'Upravljanje korisnicima', 'User Profile': 'Korisnički profil', 'User Requests': 'Korisnički zahtjevi', 'User Roles': 'Uloge korisnika', 'User Updated': 'Ažuriran korisnik', 'User updated': 'Korisnik ažuriran', 'User with Role': 'Korisnik s ulogom', "User's role": 'Korisnička uloga', 'Username': 'Korisničko ime', 'Username & Password': 'Korisničko ime i lozinka', 'Username to use for authentication at the remote site.': 'Korisničko ime za prijavu na udaljeni sajt.', 'Users': 'Korisnici', 'Users in my Organizations': 'Korisnici u mojim organizacijama', 'Users removed': 'Korisnici uklonjeni', 'Users with this Role': 'Korisnici s ovom ulogom', 'Uses the REST Query Format defined in': 'Koristi REST format upita definiran u', 'Ushahidi': 'Ushahidi', 'Ushahidi Import': 'Uvoz iz Ushahidi', 'using default': 'koristim podrazumijevani', 'Usual food sources in the area': 'Ukobičajen izvor hrane u području', 'UTC Offset': 'UTC pomak', 'Utilities': 'Usluge', 'Utility, telecommunication, other non-transport infrastructure': 'Uslužne, telekomunikacijske i ostale netransportne infrastrukture', 'Utilization Details': 'Detalji upotrebe', 'Utilization Report': 'Izvještaj o upotrebi', 'UUID of foreign Sahana server': 'UUID udaljenog Sahana servera', 'Valid': 'važeće', 'Valid From': 'Važi od', 'Valid Until': 'Važi do', 'Value': 'Vrijednost', 'Value per Pack': 'Vrijednost po paketu', 'Various Reporting functionalities': 'Razne funkcionalnosti izvještaja', 'Vatican City': 'Vatikan', 'VCA (Vulnerability and Capacity Assessment)': 'VCA (Procjena ranjivosti i kapaciteta)', 'Vehicle': 'Vozilo', 'Vehicle added': 'Dodano vozilo', 'Vehicle assigned': 'Vozilo dodijeljeno', 'Vehicle Assignment updated': 'Dodjela osoblja ažurirana', 'Vehicle Assignments': 'Dodjele vozila', 'Vehicle Categories': 'Kategorije vozila', 'Vehicle Category': 'Kategorija vozila', 'Vehicle Crime': 'Zločin s vozilima', 'Vehicle deleted': 'Obrisano vozilo', 'Vehicle Details': 'Detalji o vozilu', 'Vehicle Details added': 'dodani detalji vozila', 'Vehicle Details deleted': 'obrisani detalji vozila', 'Vehicle Details updated': 'ažurirani detalji vozila', 'Vehicle Management': 'Vođenje vozila', 'Vehicle Plate Number': 'Registarski broj vozila', 'Vehicle Type': 'Vrsta vozila.', 'Vehicle Type added': 'Vrsta vozila dodana', 'Vehicle Type deleted': 'Vrsta vozila obrisana', 'Vehicle Type Details': 'Detalji o vrsti vozila', 'Vehicle Type updated': 'Vrsta vozila ažurirana', 'Vehicle Types': 'Vrste vozila', 'Vehicle unassigned': 'Vozilo nedodijeljeno', 'Vehicle updated': 'Ažurirano vozilo', 'Vehicles': 'Vozila', 'Vehicles are assets with some extra details.': 'Vozila su sredstva sa nekim dodatnim detaljima', 'Vendor': 'Proizvođač', 'Venezuela': 'Venecuela', 'Venue': 'Mjesto održavanja', 'Verification Status': 'Status provjere', 'verified': 'provjereno', 'Verified': 'Potvrđeno', 'Verified?': 'Potvrđeno?', 'Verify password': 'Potvrdite lozinku', 'Verify Password': 'Potvrdite lozinku', 'Version': 'Verzija', 'vertical': 'vertikalno', 'Very Good': 'Veoma dobro', 'Very High': 'Veoma visok', 'Very Strong': 'Veoma jako', 'Vessel Max Length': 'Max. dužina čamca', 'Victim': 'Žrtva', 'Video Tutorials': 'Video lekcije', 'Vietnam': 'Vijetnam', 'View': 'Pogled', 'view': 'pogled', 'View & Edit Pledges': 'Pregled/Uređivanje ponuda za pomoć', 'View Alerts received using either Email or SMS': 'Pregledaj upozorenja primljena korištenjem Email-a ili SMS-a', 'View All': 'Prikaži sve', 'View all log entries': 'Pogledaj sve unose u zapisniku', 'View All Tickets': 'Pogledaj sve kartice', "View and/or update details of the person's record": 'Prikažite i/ili ažurirajte detalje zapisa za ovu osobu', 'View and/or update their details': 'Prikažite i/ili ažurirajte njihove detalje', 'View as Pages': 'Pogledaj kao stranice', 'View Email Accounts': 'Pogledaj naloge elektronske pošte', 'View Email InBox': 'Pogledaj E-mail dolazne poruke', 'View Error Tickets': 'Pregledati kartice grešaka', 'View full screen': 'Pogledaj preko cijelog ekrana', 'View Fullscreen Map': 'Vidi mapu cijelog ekrana', 'View Image': 'Pogledaj sliku', 'View InBox': 'Pogledaj dolazne poruke', 'View Items': 'Prikaz stavki', 'View Location Details': 'Pogledaj detalje lokacije', 'View log entries per repository': 'Pogledaj stavke zapisnika po repozitoriju', 'View Message Log': 'Prikaži zapisnik poruka', 'View Mobile Commons Settings': 'Pogledaj mobilne postavke', 'View On Map': 'Pogledaj na mapi', 'View on Map': 'Pogledaj na Mapi', 'View or update the status of a hospital.': 'Pregledanje ili ažuriranje statusa bolnice.', 'View Outbox': 'Pogledaj izlazno sanduče', 'View Parser Connections': 'Pogledaj parserske konekcije', 'View pending requests and pledge support.': 'Pregled zahtjeva na čekanju i ponuda podrške', 'View Picture': 'Pogledaj sliku', 'View Queries': 'Pogledaj upite', 'View Requests & Pledge Aid': 'Pogledaj zahtjeve i ponude za pomoć', 'View Requests for Aid': 'Pogledaj zahtjeve za pomoć', 'View Results of completed and/or partially completed assessments': 'Pogledaj rezultat završenih i/ili polovišno završenih procjena', 'View RSS Posts': 'Pogledaj RSS poruke', 'View RSS Settings': 'Pogledaj RSS Postavke', 'View Sender Priority': 'Pogledaj prioritet pošiljaoca', 'View Sent Emails': 'Pogledaj poslane E-mail poruke', 'View Sent SMS': 'Pogledaj poslane SMS poruke', 'View Sent Tweets': 'Pogledaj poslane Tweet poruke', 'View Settings': 'Prikaz postavki', 'View SMS InBox': 'Pogledaj SMS dolazne poruke', 'View SMS OutBox': 'Pogledaj SMS odlazne poruke', 'View Test Result Reports': 'Pogledaj izještaj o rezultatima testiranja', 'View the hospitals on a map.': 'Pogledaj bolnice na mapi.', 'View the module-wise percentage of translated strings': 'Pogledaj procenat prevedenosti stringova po modulu', 'View Tickets': 'Vidi kartice', 'View Translation Percentage': 'Pogledaj procenat prijevoda', 'View Tweet': 'Pogledaj tweet', 'View Twilio Settings': 'Pogledaj Twilio postavke', 'View Twitter InBox': 'Pogledaj Twittwe dolazne poruke', 'View/Edit Person Details': 'Pogledaj/uredi detalje osobe', 'View/Edit the Database directly': 'Pogledaj/Uredi Bazu podataka direktno', "View/Edit the Database directly (caution: doesn't respect the framework rules!)": 'Pogledaj/uredi bazu podataka direktno (Upozorenje: nepoštivanje okvirnih pravila!)', 'Village': 'Selo', 'Village / Suburb': 'Selo / predgrađe', 'Village Leader': 'Vođa sela', 'Visible?': 'Vidljivo?', 'Visual Recognition': 'Vizuelno prepoznavanje', 'vm_action': 'vm_akcija', 'Volcanic Ash Cloud': 'Oblak vulkanskog pepela', 'Volcanic Event': 'Vulkanski događaj', 'Volume (m3)': 'Zapremina (m3)', 'Volume - Fluids': 'Sadržaj - tečnosti', 'Volume - Solids': 'Sadržaj - čvrsti', 'Volume/Dimensions': 'Sadržaj/Dimenzije', 'Voluntarios': 'Volonterski', 'Volunteer': 'Volonter', 'volunteer': 'volonter', 'Volunteer added': 'Volonter dodan', 'Volunteer Availability': 'Dostupnost volontera', 'Volunteer availability added': 'Dodana dostupnost volontera', 'Volunteer availability deleted': 'Dostupnost volontera obrisana', 'Volunteer availability updated': 'Ažurirana dostupnost volontera', 'Volunteer Cluster': 'Skup volontera', 'Volunteer Cluster added': 'Skup volontera dodan', 'Volunteer Cluster deleted': 'Skup volontera obrisan', 'Volunteer Cluster Position': 'Pozicija skupa volontera', 'Volunteer Cluster Position added': 'Pozicija skupa volontera dodana', 'Volunteer Cluster Position deleted': 'Pozicija skupa volontera obrisana', 'Volunteer Cluster Position updated': 'Pozicija skupa volontera ažurirana', 'Volunteer Cluster Type': 'Tip skupa volontera', 'Volunteer Cluster Type added': 'Vrsta skupa volontera dodana', 'Volunteer Cluster Type deleted': 'Vrsta skupa volontera obrisana', 'Volunteer Cluster Type updated': 'Vrsta skupa volontera ažurirana', 'Volunteer Cluster updated': 'Skup volontera ažuriran', 'Volunteer Contact': 'Kontakt volontera', 'Volunteer Data': 'Podaci o volonterima', 'Volunteer deleted': 'Volonter obrisan', 'Volunteer Details': 'Detalji o volonteru', 'Volunteer details updated': 'Detalji o volonterima ažurirani', 'Volunteer Details updated': 'Volonter ažuriran', 'Volunteer Hours': 'Volonterski sati', 'Volunteer ID': 'ID volontera', 'Volunteer Information': 'Informacije Volontera', 'Volunteer Management': 'Koordinacija volontera', 'Volunteer Project': 'Volonterski projekat', 'Volunteer Record': 'Volonterski zapis', 'Volunteer Report': 'Izvještaj o volonterima', 'Volunteer Request': 'Volonterski zahtjev', 'Volunteer Role': 'Volonterska uloga', 'Volunteer Role added': 'Uloga volontera dodana', 'Volunteer Role Catalog': 'Katalog volonterskih uloga', 'Volunteer Role deleted': 'Uloga volontera obrisana', 'Volunteer Role Details': 'Detalji volonterske uloge', 'Volunteer Role updated': 'Uloga volontera ažurirana', 'Volunteer Service Record': 'Izvještaj o volonterskoj usluzi', 'volunteers': 'volonteri', 'Volunteers': 'Volonteri', 'Volunteers were notified!': 'Volonteri su obavješteni!', 'Vote': 'Glasati', 'Votes': 'Glasovi', 'Vulnerability Document': 'Dokument o ranjivosti', 'Walking Only': 'Samo hodanje', 'Walking time to the health service': 'Potrebno vrijeme hoda do zdravstvenih usluga', 'Wall or other structural damage': 'Zid ili druga strukturna oštećenja', 'Warehouse': 'Skladište', 'Warehouse added': 'Skladište dodano', 'Warehouse deleted': 'Skladište obrisano', 'Warehouse Details': 'Detalji o skladištu', 'Warehouse Item added': 'Dodata stavka skladišta', 'Warehouse Item deleted': 'Stavka skladišta obrisana', 'Warehouse Item Details': 'Detalji o stavci skladišta', 'Warehouse Items': 'Stavke skladišta', 'Warehouse Management': 'Upravljanje skladištem', 'Warehouse Stock': 'Zaliha u skladištu', 'Warehouse Stock Details': 'Detalji o zalihi skladišta', 'Warehouse Stock Report': 'Izvještaj zaliha u skladištu', 'Warehouse Stock updated': 'Ažurirana stavka skladišta', 'Warehouse updated': 'Skladište ažurirano', 'Warehouse/Sites Registry': 'Registar skladiša/mjesta', 'Warehouses': 'Skladišta', 'Warehousing Storage Capacity': 'Kapacitet skladišta', 'WARNING': 'UPOZORENJE', 'WASH': 'OPERI', 'Water': 'Voda', 'Water collection': 'Skupljanje vode', 'Water gallon': 'Kanister vode', 'Water Sanitation Hygiene': 'Higijena sanitacije vode', 'Water storage containers in households': 'Kontejneri za vodu u domaćinstvima', 'Water storage containers sufficient per HH': 'Kontejneri za vodu dovoljni za domaćinsto', 'Water Supply': 'Dostava vode', 'Water supply': 'Dostava vode', 'Waterspout': 'Vodena pijavica', 'WatSan': 'WatSan', 'wavy': 'valovito', 'Way Bill(s)': 'Putni troškovi', 'Waybill': 'Tovarni list', 'WAYBILL': 'TOVARNILIST', 'Waybill Number': 'Broj tovarnog lista', "We have no active problem. That's great!": 'Nemamo aktivnog problema. Odlično!', 'We have tried': 'Pokušavali smo', 'Weak': 'Slabo', 'Web API settings updated': 'Web API postavke ažurirane', 'Web Form': 'Web formular', 'Web Map Service Browser Name': 'Ime usluge za pregled mape preko web-a', 'Web Map Service Browser URL': 'URL izbornika Web Map servisa', 'Web2py executable zip file found - Upload to replace the existing file': 'Web2py izvršna zip datoteka nađena - Pošaljite da zamijenite postojeću datoteku', 'Web2py executable zip file needs to be uploaded first to use this function.': 'Web2py izvršna zip datoteka treba da se prvo pošalje da bi ste koristili ovu funkcionalnost', 'Web2py executable zip file needs to be uploaded to use this function.': 'Web2py izvršna zip datoteka treba da se prvo pošalje da bi ste koristili ovu funkcionalnost', 'Website': 'Web stranica', 'Wednesday': 'Srijeda', 'Week': 'Sedmica', 'Weekends only': 'Samo vikendima', 'weekly': 'sedmično', 'Weekly': 'Sedmično', 'Weight': 'Težina', 'Weight (kg)': 'Težina (kg)', 'Welcome to the Sahana Eden Disaster Management System': 'Dobrodošli na Sahana Eden, sistem za upravljanje u slučaju katastrofa', 'Welcome to the Sahana FOSS Disaster Management System': 'Dobrodošli na Sahana FOSS, sistem za upravljanje u slučaju katastrofa', 'Welcome to the Sahana Portal at': 'Dobrodošli na Sahana Portal u', 'Well-Known Text': 'Dobro poznat tekst', 'Were breast milk substitutes used prior to the disaster?': 'Da li su korištene zamjene za majčino mlijeko prije katastrofe?', 'WFS Layer': 'WFS sloj', 'WGS84 (EPSG 4236) is required for many WMS servers.': 'WGS84 (EPSG 4236) je potreban za mnoge WMS servere', 'What are the factors affecting school attendance?': 'Koji faktori koji utiču na pohađanje škole', 'What are your main sources of cash to restart your business?': 'Koji su vaši glavni izvori novca za ponovni početak posla?', 'What are your main sources of income now?': 'Koji su vaši izvori primanja sada?', 'What do you spend most of your income on now?': 'Na šta sada trošite najveći dio prihoda?', 'What food stocks exist? (main dishes)': 'Koje zalihe hrane postoje (glavne namirnice)=', 'What food stocks exist? (side dishes)': 'Koje zalihe hrane postoje (dodatne namirnice)', 'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': 'Koji je glavni izvor čiste vode za svakodnevnu upotrebu (pranje, kuhanje, kupanje=', 'What is your major source of drinking water?': 'Koji je glavni izvor pitke vode?', 'What order to be contacted in.': 'Redoslijed kontaktiranja.', "What should be done to reduce women and children's vulnerability to violence?": 'Šta uraditi za smanjiti ranjivost žena i djece zbog nasilja?', 'What the Items will be used for': 'Za što će se koristiti ove stavke?', 'What type of latrines are available in the village/IDP centre/Camp?': 'Koji tip zahoda je dostupan u selu/centru/kampu?', 'What type of salvage material can be used from destroyed houses?': 'Koji tip spašenog materijala se može koristiti iz uništenih kuća', 'What type of salvage material can be used from destroyed schools?': 'Koji tip spašenog materijala se može koristiti iz uništenih škola', 'What types of health problems do children currently have?': 'Koju vrstu zdravstvenih problema djeca trenutno imaju?', 'What types of household water storage containers are available?': 'Koji tipovi kućnih spremnika za vodu su dostupni?', 'What were your main sources of income before the disaster?': 'Koji su bili vaši izvori primanja prije nepogode', 'Wheat': 'Žito', 'When reports were entered': 'Kada su izvještaji uneseni', "When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": 'Kada se podaci usklađuju, dolazi do konflikta u slučaju kada dvije (ili više) stranaka želi da sinhronizira informacije koje su izmjenili, tj. protivne informacije. Sync modul pokušava riješiti ovakve konflikte ali ne uspjeva u nekim slučajevima. Tada je do Vas da riješite konflikte ručno, kliknite na link sa desne strane koji će Vas uputiti na tu stranicu.', 'When this search was last checked for changes.': 'Kada je ova pretraga zadnji put provjerena za promjene.', 'Where are the alternative places for studying?': 'Gdje su alternativna mjesta za studiranje', 'Where are the separated children originally from?': 'Koje je porijeklo odvojene djece?', 'Where do the majority of people defecate?': 'Gdje većina ljudi vrši nuždu?', 'Where have the children been sent?': 'Gdje su djeca poslana?', 'Where is solid waste disposed in the village/camp?': 'Gdje se smeće ostavlja u selu/kampu?', 'Where reached': 'Gdje je dosegnut', 'Whether calls to this resource should use this configuration as the default one': 'Da li bi pozivi na ovaj resurs trebali koristiti ovu konfiguraciju kao podrazumijevanu', 'Whether the Latitude & Longitude are inherited from a higher level in the location hierarchy rather than being a separately-entered figure.': 'Da li su geografska širina i dužina naslijeđeni iz višeg nivoa u hijerarhiji lokacija, umjesto da su posebno navedeni.', 'Whether the resource should be tracked using S3Track rather than just using the Base Location': 'Da li se resurs treba pratiti koristeći S3Track umjesto da se samo koriste iz bazne lokacije', 'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.': 'Da li je ovo kopija Sahana Eden, Sahana Agasti, Ushahidi ili drugo', 'Which methods to apply when importing data to the local repository': 'Koje metode primijeniti pri uvozu podataka u lokalni repozitorij', 'Whiskers': 'Brkovi', 'white': 'bijela', 'Whitelist a Sender': 'Stavi pošiljaoca na bijelu listu', 'Whitelisted Senders': 'Pošiljaoci na bijeloj listi', 'Who is doing what and where': 'Ko šta radi i gdje', 'Who is doing What Where': 'Ko šta radi i gdje', 'Who usually collects water for the family?': 'Ko obično u porodici sakuplja vodu?', 'wide': 'širok', 'wider area, longer term, usually contain multiple Activities': 'veće područje, na duže vrijeme, obično sadrži više aktivnosti', 'widowed': 'udovac/udovica', 'Width': 'širina', 'Width (m)': 'Širina (m)', 'Wikipedia': 'Wikipedia', 'Wild Fire': 'Požar', 'Will be filled automatically when the Item has been Repacked': 'Bit će popunjeno automatski pri prepakovanju stavke', 'Will create and link your user account to the following records': 'Kreiraće i povezati korisnički nalog s sljedećim zapisima', 'Wind Chill': 'Hladni Vjetar', 'window': 'prozor', 'Window frame': 'Okvir prozora', 'windows broken, cracks in walls, roof slightly damaged': 'prozori razbijeni, pukotine u zidovima, krov blago oštećen', 'Winter Storm': 'Zimska oluja', 'within human habitat': 'unutar prebivališta', 'WKT is Invalid!': 'WKT nije validan', 'WMS Browser URL': 'URL za WMS pretraživač', 'WMS Layer': 'WMS sloj', 'Women of Child Bearing Age': 'Žena u reproduktivnom dobu', 'Women participating in coping activities': 'Žene učesnici u aktivnostima prilagođavanja', 'Women who are Pregnant or in Labour': 'Trudnice i porodilje', 'Womens Focus Groups': 'Ženske fokus grupe', 'Wooden plank': 'drvena daska', 'Wooden poles': 'Drveni stubovi', 'Work': 'Posao', 'Work on Program': 'Rad na programu', 'Work phone': 'Telefon na poslu', 'Working hours end': 'Radna satnica završena', 'Working hours start': 'Početak radnog vremena', 'Working or other to provide money/food': 'Radi, ili nešto drugo, da bi osigurao novac/hranu', 'Would you like to display the photos on the map?': 'Želite li prikazati fotografije na mapi', 'X-Ray': 'X-zraci', 'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt modul nije dostupan uz tekuću verziju Pythona - to se treba instalirati za XLS izlaz!', 'xlwt module not available within the running Python - this needs installing to do XLS Reporting!': 'Modul xlwt nije dostupan unutar pokrenutog Python-a, ovo zahtijeva instalaciju XLS izvještaja!', 'xlwt not installed, so cannot export as a Spreadsheet': 'xlwt nije instaliran pa ne mogu izvesti kao tablicu', 'XSL Template Not Found:': 'XSL šablon nije pronađen:', 'XSL Transformation Error:': 'Greška u XSL transformaciji', 'XSLT Template Not Found:': 'XSLT šablon nije pronađen:', 'XSLT Transformation Error:': 'Greška u XSLT transformaciji', 'XYZ Layer': 'XYZ sloj', "Yahoo Layers cannot be displayed if there isn't a valid API Key": 'Yahoo slojevi ne mogu biti prikazani ako ne postoji ispravan API ključ', 'Year': 'Godina', 'Year built': 'Godina izgradnje', 'Year of Manufacture': 'Godina proizvodnje', 'Year that the organization was founded': 'Godina osnivanja organizacije', 'Yellow': 'Žuta', 'Yemen': 'Jemen', 'YES': 'DA', 'yes': 'da', 'Yes': 'Da', 'Yes, No': 'Da , ne', "Yes, No, Don't Know": 'Da, Ne, Ne znam', 'You are a recovery team?': 'Vi ste ekipa za sanaciju?', 'You are attempting to delete your own account - are you sure you want to proceed?': 'Pokušavate izbrisati svoj vlastiti račun - da li ste sigurni da želite da nastavite?', 'You are currently reported missing!': 'Vi ste trenutno prijavljeni kao nestali!', 'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'Možete promijeniti konfiguraciju modula sinhronizacije u sekciji Postavke. Ova konfiguracije uključuje vaš UUID (jedinstveni indentifikacijski broj), sinhronizaciju rasporeda, upravljački servis itd. Idite na sljedeći link da biste otišli na stranicu Postavke sinhronizacije.', 'You can click on the map below to select the Lat/Lon fields': 'Pritisnite na mapu ispod da selektirate Lat/Lon polja', 'You can only make %d kit(s) with the available stock': 'Možete napraviti %d komplet(a) s dostupnom zalihom', "You can search by asset number, item description or comments. You may use % as wildcard. Press 'Search' without input to list all assets.": "Možete tražiti po broju sredstva, opisu stavke ili komentarima. Možete koristi % kao džoker. Pritisnite 'Traži' bez unosa za spisak svih sredstava.", "You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press 'Search' without input to list all.": "Možete tražiti po imenu grupe, opisu ili komentarima i po imenu organizacije ili akronimu. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svega.", "You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events.": "Možete tražiti po imenu kursa, mjestu održavanja ili komentarima događaja. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih događaja-", "You can search by description. You may use % as wildcard. Press 'Search' without input to list all incidents.": "Možete tražiti po opisu. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih incidenata.", "You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Možete tražiti po radnom mjestu ili ličnom imenu, unesite ime, prezime ili srednje ime razdvojeno razmacima. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih osoba.", 'You can search by name, acronym or comments': 'Možete tražiti po imenu, akronimu ili komentarima', 'You can search by name, acronym, comments or parent name or acronym.': 'Možete tražiti po imenu, akronimu, komentarima ili imenu/akronimu nadređenog zapisa.', "You can search by person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Možete tražiti po ličnom imenu, unesite ime, prezime ili srednje ime razdvojeno razmacima. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih osoba.", "You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees.": "Možete tražiti po imenu kursiste, imenu kursa ili komentarima. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih kursista.", 'You can select an area on the image and save to crop it.': 'Možete odabrati područje slike za njeno snimanje i izrezivanje.', 'You can select the Draw tool': 'Možete odabrati alat za crtanje', 'You can select the Draw tool (': 'Možete odabrati alat za crtanje (', 'You can set the modem settings for SMS here.': 'Možete postaviti postavke modema za SMS ovdje.', 'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'Možete koristiti sredstvo za konverziju kako bi pretvorili iz GPS koordinata ili iz Stepeni/Minute/Sekunde.', 'You do no have permission to cancel this received shipment.': 'Nemate dozvolu da otkažete primljenu isporuku.', 'You do no have permission to cancel this sent shipment.': 'Nemate dozvolu da otkažete ovu poslanu pošiljku.', 'You do no have permission to make this commitment.': 'Ne posjedujte dozvolu da se obavežete za takvo nešto', 'You do no have permission to receive this shipment.': 'Nemate dozvolu da primite ovu pošiljku.', 'You do no have permission to send this shipment.': 'Nemate dozvolu da pošaljete ovu pošiljku', 'You do not have permission for any facility to add an order.': 'Nemate odobrenja ni za jedan objekat da dodate narudžbu.', 'You do not have permission for any facility to make a commitment.': 'Nemate dozvolu za angažovanje za neki objekt.', 'You do not have permission for any facility to make a request.': 'Nemate dozvolu za podnošenje zahtjeva bilo kojem objektu.', 'You do not have permission for any facility to perform this action.': 'Nemate odobrenja ni za jedan objekat da obavite ovu akciju.', 'You do not have permission for any facility to receive a shipment.': 'Nemate dozvolu ni za jedan objekat da primite pošiljku.', 'You do not have permission for any facility to send a shipment.': 'Nemate dozvolu da pošaljete pošiljku bilo kojem objektu.', 'You do not have permission for any organization to perform this action.': 'Nemate odobrenja ni za jednu organizaciju da obavite ovu akciju', 'You do not have permission for any site to add an inventory item.': 'Nemate dozvolu da dodate stavku inventara ni na jednom mjestu', 'You do not have permission for any site to receive a shipment.': 'Nemate dozvolu da ijedna lokacija dobije pošiljku', 'You do not have permission for any site to send a shipment.': 'Nemate dozvolu za bilo koju stranicu za slanje pošiljke', 'You do not have permission to adjust the stock level in this warehouse.': 'Nemate odobrenja da prilagodite nivo zalihe za ovo skladište', 'You do not have permission to cancel this received shipment.': 'Nemate dozvolu da otkažete primljenu isporuku.', 'You do not have permission to cancel this sent shipment.': 'Nemate dozvolu da otkažete ovu poslanu pošiljku.', 'You do not have permission to make this commitment.': 'Ne posjedujte dozvolu da napravite ovo zaduženje', 'You do not have permission to receive this shipment.': 'Nemate dozvolu da primite ovu pošiljku.', 'You do not have permission to return this sent shipment.': 'Nemate dozvolu da vratite ovu poslanu pošiljku.', 'You do not have permission to send a shipment from this site.': 'Nemate dozvolu da šaljete pošiljku sa ovog mjesta.', 'You do not have permission to send messages': 'Nemate dozvolu da pošaljete poruke', 'You do not have permission to send this shipment.': 'Nemate dozvolu da pošaljete ovu pošiljku', 'You have a personal map configuration. To change your personal configuration, click': 'Imate ličnu konfiguraciju mape. Ukoliko želite promjeniti ličnu konfiguraciju, kliknite', 'You have a personal map configuration. To change your personal configuration, click ': 'Imate ličnu konfiguraciju mape. Za promjenu, kliknite ', 'You have committed for all people in this Request. Please check that all details are correct and update as-required.': 'Zadužili ste za sve ljude u ovom zahtjevu. Molim provjerite da su svi detalji ispravni i ažurirajte kako je potrebno.', 'You have committed to all items in this Request. Please check that all details are correct and update as-required.': 'Zadužili ste sve ljude u ovom zahtjevu. Molim provjerite da su svi detalji ispravni i ažurirajte kako je potrebno.', 'You have committed to this Request. Please check that all details are correct and update as-required.': 'Napravili ste zaduženje po ovom zahtjevu. Molim provjerite da su svi detalji ispravni i ažurirajte kako je potrebno.', 'You have found a dead body?': 'Pronašli ste mrtvo tijelo?', "You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click": 'Imate lične postavke, pa promjene ovdje načinjene vam neće biti vidljive. Ukoliko želite promjeniti ličnu konfiguraciju, kliknite', "You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "Postoje promjene koje nisu spašene. Pritisnite 'Odustani', zatim pritisnite 'Snimi' da biste ih sačuvali. Pritisnite OK da biste ih odbacili.", 'You have unsaved changes. You need to press the Save button to save them': 'Imate nesnimljenih promjena. Možete kliknuti dugme za snimanje da ih snimite', "You haven't made any calculations": 'Niste napravili nikakve proračune', 'You must agree to the Terms of Service': 'Morate se složiti s uslovima upotrebe', 'You must be logged in to register volunteers.': 'Morate biti prijavljeni da registrujete volontere.', 'You must be logged in to report persons missing or found.': 'Morate biti ulogovani da biste prijavili nestanak ili pronalazak osobe.', 'You must enter a minimum of %d characters': 'Morate unijeti najmanje %d znakova', 'You must enter a minimum of 4 characters': 'Morate unijeti najmanje 4 znaka', 'You must provide a series id to proceed.': 'Morate obezbijediti ID serije da nastavite.', 'You need to check all item quantities and allocate to bins before you can receive the shipment': 'Morate provjeriti sve količine stavki i dodijeliti ih u korpe prije prijema pošiljke', 'You need to check all item quantities before you can complete the return process': 'Morate provjeriti sve količine stavki prije završetka procesa vraćanja', 'You need to create a template before you can create a series': 'Trebate kreirati predložak prije nego možete kreirati seriju', 'You need to have at least 2 records in this list in order to merge them.': 'Trebate imati bar 2 zapisa u ovoj listi da ih možet spojiti.', 'You need to use the spreadsheet which you can download from this page': 'Trebate koristiti tablicu koju možete preuzeti s ove stranice', 'You should edit Twitter settings in models/000_config.py': 'Trebali biste izmijeniti postavke na Twitteru u models/000_config.py', 'Your action is required. Please approve user': 'Vaša akcija je potrebna, molim potvrdite korisnika', 'Your action is required. Please approve user %s asap:': 'Potrebna je vaša akcija. Potvrdite korisnika %s što prije moguće:', 'Your current ordered list ... (#TODO [String])': 'Vaša trenutna lista narudžbi ... (#TODO [String])', 'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Vaša trenutno naručena lista riješenih predmeta je prikazana ispod. Možete je promijeniti tako što ćete opet glasati.', 'Your name for this search. Notifications will use this name.': 'Vaše ime za ovu pretragu. Napomene će koristiti ovo ime.', 'Your post was added successfully.': 'Vaša poruka je uspješno dodata.', 'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Vašem sistemu je dodijeljen jedinstveni identifikacijski broj (UUID), kojeg ostali računari oko vas mogu koristiti da vas identifikuju. Da biste vidjeli svoj UUID, možete otići na Sinhronizacija-> Postavke sinhronizacije. Na toj stranici možete vidjeti i ostala podešavanja.', 'Zambia': 'Zambija', 'Zero Hour': 'Početni trenutak', 'Zeroconf Description': 'Opis bez potrebe za konfiguracijom', 'Zimbabwe': 'Zimbabve', 'Zinc roof': 'Krov od cinka', 'ZIP Code': 'Poštanski broj', 'ZIP/Postcode': 'Poštanski broj', 'Zone': 'Zona', 'Zoom': 'Uvećaj', 'Zoom In': 'Uvećaj', 'Zoom in closer to Edit OpenStreetMap layer': 'Približi za uređivanjeEdit OpenStreetMap sloja', 'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'Uvećanje: kliknite unutar mape ili koristite lijevu tipku miša da napravite pravougaonik', 'Zoom Levels': 'Nivoi zumiranja', 'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'Umanjenje: kliknite unutar mape ili koristite lijevu tipku miša da napravite pravougaonik', 'Zoom to Current Location': 'Uvećaj na trenutnu lokaciju', 'Zoom to maximum map extent': 'Uvećaj na maksimalnu veličinui mape', }
ahaym/eden
languages/bs.py
Python
mit
538,016
[ "VisIt" ]
c50f146723cf9bb3e21d9780b36ea338f7bcd4bb002e938c9a8835897dd1b04e
# # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2019 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This file is part of Psi4. # # Psi4 is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, version 3. # # Psi4 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along # with Psi4; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # """Support for using Psi4 as an MDI engine. For details regarding MDI, see https://molssi.github.io/MDI_Library/html/index.html. """ import psi4 import numpy as np import qcelemental as qcel _have_mdi = False try: from mdi import MDI_Init, MDI_Get_Intra_Code_MPI_Comm, MDI_Accept_Communicator, \ MDI_Send, MDI_Recv, MDI_Recv_Command, MDI_INT, MDI_DOUBLE, \ MDI_Register_Node, MDI_Register_Command _have_mdi = True except ImportError: pass try: from mpi4py import MPI use_mpi4py = True except ImportError: use_mpi4py = False class MDIEngine(): def __init__(self, scf_method, **kwargs): """ Initialize an MDIEngine object for communication with MDI Arguments: scf_method: Method used when calculating energies or gradients """ # Method used when the SCF command is received self.scf_method = scf_method # Additional arguments for energy, gradient, or optimization calculations self.kwargs = kwargs # Molecule all MDI operations are performed on input_molecule = kwargs.pop('molecule', psi4.core.get_active_molecule()) self.molecule = input_molecule.clone() psi4.core.set_active_molecule(self.molecule) # Most recent SCF energy self.energy = 0.0 # Variables used when MDI sets a lattice of point charges self.nlattice = 0 # number of lattice point charges self.clattice = [] # list of lattice coordinates self.lattice = [] # list of lattice charges self.lattice_field = psi4.QMMM() # Psi4 chargefield # MPI variables self.mpi_world = None self.world_rank = 0 # Flag for if a lattice of point charges has been set self.set_lattice = False # Get correct intra-code MPI communicator if use_mpi4py: self.mpi_world = MDI_Get_Intra_Code_MPI_Comm() self.world_rank = self.mpi_world.Get_rank() # Psi4 does not currently support multiple MPI ranks if self.mpi_world.Get_size() != 1: MPI.COMM_WORLD.Abort() # Accept a communicator to the driver code self.comm = MDI_Accept_Communicator() # Ensure that the molecule is using c1 symmetry self.molecule.reset_point_group('c1') self.molecule.fix_orientation(True) self.molecule.fix_com(True) self.molecule.reinterpret_coordentry(False) self.molecule.update_geometry() # Flag to stop listening for MDI commands self.stop_listening = False # Dictionary of all supported MDI commands self.commands = { "<NATOMS": self.send_natoms, "<COORDS": self.send_coords, "<CHARGES": self.send_charges, "<ELEMENTS": self.send_elements, "<MASSES": self.send_masses, "<ENERGY": self.send_energy, "<FORCES": self.send_forces, ">COORDS": self.recv_coords, ">NLATTICE": self.recv_nlattice, ">CLATTICE": self.recv_clattice, ">LATTICE": self.recv_lattice, ">MASSES": self.recv_masses, "SCF": self.run_scf, "<DIMENSIONS": self.send_dimensions, "<TOTCHARGE": self.send_total_charge, ">TOTCHARGE": self.recv_total_charge, "<ELEC_MULT": self.send_multiplicity, ">ELEC_MULT": self.recv_multiplicity, "EXIT": self.exit } # Register all the supported commands MDI_Register_Node("@DEFAULT") for command in self.commands.keys(): MDI_Register_Command("@DEFAULT", command) def length_conversion(self): """ Obtain the conversion factor between the geometry specification units and bohr :returns: *unit_conv* Conversion factor between the geometry specification units and bohr """ unit_name = self.molecule.units() if unit_name == "Angstrom": unit_conv = qcel.constants.bohr2angstroms elif unit_name == "Bohr": unit_conv = 1.0 else: raise Exception('Unrecognized unit type: ' + str(unit_name)) return unit_conv # Respond to the <NATOMS command def send_natoms(self): """ Send the number of atoms through MDI """ natom = self.molecule.natom() MDI_Send(natom, 1, MDI_INT, self.comm) return natom # Respond to the <COORDS command def send_coords(self): """ Send the nuclear coordinates through MDI """ coords = self.molecule.geometry().np.ravel() MDI_Send(coords, len(coords), MDI_DOUBLE, self.comm) return coords # Respond to the <CHARGES command def send_charges(self): """ Send the nuclear charges through MDI :returns: *charges* Atomic charges """ natom = self.molecule.natom() charges = [self.molecule.charge(iatom) for iatom in range(natom)] MDI_Send(charges, natom, MDI_DOUBLE, self.comm) return charges # Respond to the <MASSES command def send_masses(self): """ Send the nuclear masses through MDI :returns: *masses* Atomic masses """ natom = self.molecule.natom() molecule_dict = self.molecule.to_dict() masses = molecule_dict['mass'] MDI_Send(masses, natom, MDI_DOUBLE, self.comm) return masses # Respond to the <ELEMENTS command def send_elements(self): """ Send the atomic number of each nucleus through MDI :returns: *elements* Element of each atom """ natom = self.molecule.natom() elements = [self.molecule.true_atomic_number(iatom) for iatom in range(natom)] MDI_Send(elements, natom, MDI_INT, self.comm) return elements # Respond to the <ENERGY command def send_energy(self): """ Send the total energy through MDI :returns: *energy* Energy of the system """ self.run_scf() MDI_Send(self.energy, 1, MDI_DOUBLE, self.comm) return self.energy # Respond to the <FORCES command def send_forces(self): """ Send the nuclear forces through MDI :returns: *forces* Atomic forces """ force_matrix = psi4.driver.gradient(self.scf_method, **self.kwargs) forces = force_matrix.np.ravel() MDI_Send(forces, len(forces), MDI_DOUBLE, self.comm) return forces # Respond to the >CHARGES command def recv_charges(self, charges=None): """ Receive a set of nuclear charges through MDI and assign them to the atoms in the current molecule Arguments: charges: New nuclear charges. If None, receive through MDI. """ natom = self.molecule.natom() if charges is None: charges = MDI_Recv(natom, MDI_DOUBLE, self.comm) # Assign the charge of all atoms, taking care to avoid ghost atoms jatom = 0 for iatom in range(natom): while self.molecule.fZ(jatom) == 0 and jatom < self.molecule.nallatom(): jatom = jatom + 1 if jatom >= self.molecule.nallatom(): raise Exception('Unexpected number of ghost atoms when receiving masses') self.molecule.set_nuclear_charge(iatom, charges[jatom]) jatom = jatom + 1 # Respond to the >COORDS command def recv_coords(self, coords=None): """ Receive a set of nuclear coordinates through MDI and assign them to the atoms in the current molecule Arguments: coords: New nuclear coordinates. If None, receive through MDI. """ natom = self.molecule.natom() if coords is None: coords = MDI_Recv(3 * natom, MDI_DOUBLE, self.comm) matrix = psi4.core.Matrix.from_array(np.array(coords).reshape(-1, 3)) self.molecule.set_geometry(matrix) # Respond to the >MASSES command def recv_masses(self, masses=None): """ Receive a set of nuclear masses through MDI and assign them to the atoms in the current molecule Arguments: masses: New nuclear masses. If None, receive through MDI. """ natom = self.molecule.natom() if masses is None: masses = MDI_Recv(natom, MDI_DOUBLE, self.comm) # Assign the mass of all atoms, taking care to avoid ghost atoms jatom = 0 for iatom in range(natom): while self.molecule.fZ(jatom) == 0 and jatom < self.molecule.nallatom(): jatom = jatom + 1 if jatom >= self.molecule.nallatom(): raise Exception('Unexpected number of ghost atoms when receiving masses') self.molecule.set_mass(iatom, masses[jatom]) jatom = jatom + 1 # Set a lattice of point charges def set_lattice_field(self): """ Set a field of lattice point charges using information received through MDI """ self.lattice_field = psi4.QMMM() unit_conv = self.length_conversion() for ilat in range(self.nlattice): latx = self.clattice[3 * ilat + 0] * unit_conv laty = self.clattice[3 * ilat + 1] * unit_conv latz = self.clattice[3 * ilat + 2] * unit_conv self.lattice_field.extern.addCharge(self.lattice[ilat], latx, laty, latz) psi4.core.set_global_option_python('EXTERN', self.lattice_field.extern) self.set_lattice = True # Respond to the >NLATTICE command def recv_nlattice(self, nlattice=None): """ Receive the number of lattice point charges through MDI Arguments: nlattice: New number of point charges. If None, receive through MDI. """ if nlattice is None: self.nlattice = MDI_Recv(1, MDI_INT, self.comm) else: self.nlattice = nlattice self.clattice = [0.0 for ilat in range(3 * self.nlattice)] self.lattice = [0.0 for ilat in range(self.nlattice)] self.set_lattice_field() # Respond to the >CLATTICE command def recv_clattice(self, clattice=None): """ Receive the coordinates of a set of lattice point charges through MDI Arguments: clattice: New coordinates of the lattice of point charges. If None, receive through MDI. """ if clattice is None: self.clattice = MDI_Recv(3 * self.nlattice, MDI_DOUBLE, self.comm) else: self.clattice = clattice self.set_lattice_field() # Respond to the >LATTICE command def recv_lattice(self, lattice=None): """ Receive the charges of a set of lattice point charges through MDI Arguments: lattice: New charges of the lattice of point charges. If None, receive through MDI. """ if lattice is None: self.lattice = MDI_Recv(self.nlattice, MDI_DOUBLE, self.comm) else: self.lattice = lattice self.set_lattice_field() # Respond to the SCF command def run_scf(self): """ Run an energy calculation """ self.energy = psi4.energy(self.scf_method, **self.kwargs) # Respond to the <DIMENSIONS command def send_dimensions(self): """ Send the dimensionality of the system through MDI :returns: *dimensions* Dimensionality of the system """ dimensions = [1, 1, 1] MDI_Send(dimensions, 3, MDI_INT, self.comm) return dimensions # Respond to the <TOTCHARGE command def send_total_charge(self): """ Send the total system charge through MDI :returns: *charge* Total charge of the system """ charge = self.molecule.molecular_charge() MDI_Send(charge, 1, MDI_DOUBLE, self.comm) return charge # Respond to the >TOTCHARGE command def recv_total_charge(self, charge=None): """ Receive the total system charge through MDI Arguments: charge: New charge of the system. If None, receive through MDI. """ if charge is None: charge = MDI_Recv(1, MDI_DOUBLE, self.comm) self.molecule.set_molecular_charge(int(round(charge))) # Respond to the <ELEC_MULT command def send_multiplicity(self): """ Send the electronic multiplicity through MDI :returns: *multiplicity* Multiplicity of the system """ multiplicity = self.molecule.multiplicity() MDI_Send(multiplicity, 1, MDI_INT, self.comm) return multiplicity # Respond to the >ELEC_MULT command def recv_multiplicity(self, multiplicity=None): """ Receive the electronic multiplicity through MDI Arguments: multiplicity: New multiplicity of the system. If None, receive through MDI. """ if multiplicity is None: multiplicity = MDI_Recv(1, MDI_INT, self.comm) self.molecule.set_multiplicity(multiplicity) # Respond to the EXIT command def exit(self): """ Stop listening for MDI commands """ self.stop_listening = True # If a lattice of point charges was set, unset it now if self.set_lattice: psi4.core.set_global_option_python('EXTERN', None) # Enter server mode, listening for commands from the driver def listen_for_commands(self): """ Receive commands through MDI and respond to them as defined by the MDI Standard """ while not self.stop_listening: if self.world_rank == 0: command = MDI_Recv_Command(self.comm) else: command = None if use_mpi4py: command = self.mpi_world.bcast(command, root=0) if self.world_rank == 0: psi4.core.print_out('\nMDI command received: ' + str(command) + ' \n') # Search for this command in self.commands found_command = False for supported_command in self.commands: if not found_command and command == supported_command: # Run the function corresponding to this command self.commands[supported_command]() found_command = True if not found_command: raise Exception('Unrecognized command: ' + str(command)) def mdi_init(mdi_arguments): """ Initialize the MDI Library Arguments: mdi_arguments: MDI configuration options """ mpi_world = None if use_mpi4py: mpi_world = MPI.COMM_WORLD MDI_Init(mdi_arguments, mpi_world) def mdi_run(scf_method, **kwargs): """ Begin functioning as an MDI engine Arguments: scf_method: Method used when calculating energies or gradients """ engine = MDIEngine(scf_method, **kwargs) engine.listen_for_commands()
jgonthier/psi4
psi4/driver/mdi_engine.py
Python
lgpl-3.0
15,842
[ "Psi4" ]
ca6fef0e627a0a5a65cd204fde3f279607a66b1fac05017ecde6a8f0c04092fd
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import mock import grpc from grpc.experimental import aio from collections.abc import Iterable import json import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from requests import Response from requests import Request, PreparedRequest from requests.sessions import Session from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.compute_v1.services.disk_types import DiskTypesClient from google.cloud.compute_v1.services.disk_types import pagers from google.cloud.compute_v1.services.disk_types import transports from google.cloud.compute_v1.types import compute from google.oauth2 import service_account import google.auth def client_cert_source_callback(): return b"cert bytes", b"key bytes" # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. def modify_default_endpoint(client): return ( "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT ) def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" sandbox_endpoint = "example.sandbox.googleapis.com" sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" assert DiskTypesClient._get_default_mtls_endpoint(None) is None assert DiskTypesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint assert ( DiskTypesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint ) assert ( DiskTypesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint ) assert ( DiskTypesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint ) assert DiskTypesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi @pytest.mark.parametrize("client_class,transport_name", [(DiskTypesClient, "rest"),]) def test_disk_types_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == ( "compute.googleapis.com{}".format(":443") if transport_name in ["grpc", "grpc_asyncio"] else "https://{}".format("compute.googleapis.com") ) @pytest.mark.parametrize( "transport_class,transport_name", [(transports.DiskTypesRestTransport, "rest"),] ) def test_disk_types_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=False) use_jwt.assert_not_called() @pytest.mark.parametrize("client_class,transport_name", [(DiskTypesClient, "rest"),]) def test_disk_types_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds client = client_class.from_service_account_file( "dummy/file/path.json", transport=transport_name ) assert client.transport._credentials == creds assert isinstance(client, client_class) client = client_class.from_service_account_json( "dummy/file/path.json", transport=transport_name ) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == ( "compute.googleapis.com{}".format(":443") if transport_name in ["grpc", "grpc_asyncio"] else "https://{}".format("compute.googleapis.com") ) def test_disk_types_client_get_transport_class(): transport = DiskTypesClient.get_transport_class() available_transports = [ transports.DiskTypesRestTransport, ] assert transport in available_transports transport = DiskTypesClient.get_transport_class("rest") assert transport == transports.DiskTypesRestTransport @pytest.mark.parametrize( "client_class,transport_class,transport_name", [(DiskTypesClient, transports.DiskTypesRestTransport, "rest"),], ) @mock.patch.object( DiskTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DiskTypesClient) ) def test_disk_types_client_client_options( client_class, transport_class, transport_name ): # Check that if channel is provided we won't create a new one. with mock.patch.object(DiskTypesClient, "get_transport_class") as gtc: transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. with mock.patch.object(DiskTypesClient, "get_transport_class") as gtc: client = client_class(transport=transport_name) gtc.assert_called() # Check the case api_endpoint is provided. options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): client = client_class(transport=transport_name) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError): client = client_class(transport=transport_name) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,use_client_cert_env", [ (DiskTypesClient, transports.DiskTypesRestTransport, "rest", "true"), (DiskTypesClient, transports.DiskTypesRestTransport, "rest", "false"), ], ) @mock.patch.object( DiskTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DiskTypesClient) ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_disk_types_client_mtls_env_auto( client_class, transport_class, transport_name, use_client_cert_env ): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. # Check the case client_cert_source is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): options = client_options.ClientOptions( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) if use_client_cert_env == "false": expected_client_cert_source = None expected_host = client.DEFAULT_ENDPOINT else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=client_cert_source_callback, ): if use_client_cert_env == "false": expected_host = client.DEFAULT_ENDPOINT expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT expected_client_cert_source = client_cert_source_callback patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize("client_class", [DiskTypesClient]) @mock.patch.object( DiskTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DiskTypesClient) ) def test_disk_types_client_get_mtls_endpoint_and_cert_source(client_class): mock_client_cert_source = mock.Mock() # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source == mock_client_cert_source # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): mock_client_cert_source = mock.Mock() mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): ( api_endpoint, cert_source, ) = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @pytest.mark.parametrize( "client_class,transport_class,transport_name", [(DiskTypesClient, transports.DiskTypesRestTransport, "rest"),], ) def test_disk_types_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. options = client_options.ClientOptions(scopes=["1", "2"],) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [(DiskTypesClient, transports.DiskTypesRestTransport, "rest", None),], ) def test_disk_types_client_client_options_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "request_type", [compute.AggregatedListDiskTypesRequest, dict,] ) def test_aggregated_list_rest(request_type): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding request_init = {"project": "sample1"} request = request_type(request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. return_value = compute.DiskTypeAggregatedList( id="id_value", kind="kind_value", next_page_token="next_page_token_value", self_link="self_link_value", unreachables=["unreachables_value"], ) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 json_return_value = compute.DiskTypeAggregatedList.to_json(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value response = client.aggregated_list(request) # Establish that the response is the type that we expect. assert isinstance(response, pagers.AggregatedListPager) assert response.id == "id_value" assert response.kind == "kind_value" assert response.next_page_token == "next_page_token_value" assert response.self_link == "self_link_value" assert response.unreachables == ["unreachables_value"] def test_aggregated_list_rest_required_fields( request_type=compute.AggregatedListDiskTypesRequest, ): transport_class = transports.DiskTypesRestTransport request_init = {} request_init["project"] = "" request = request_type(request_init) jsonified_request = json.loads( request_type.to_json( request, including_default_value_fields=False, use_integers_for_enums=False ) ) # verify fields with default values are dropped unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() ).aggregated_list._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present jsonified_request["project"] = "project_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() ).aggregated_list._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set( ( "filter", "include_all_scopes", "max_results", "order_by", "page_token", "return_partial_success", ) ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone assert "project" in jsonified_request assert jsonified_request["project"] == "project_value" client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) request = request_type(request_init) # Designate an appropriate value for the returned response. return_value = compute.DiskTypeAggregatedList() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values # for required fields will fail the real version if the http_options # expect actual values for those fields. with mock.patch.object(path_template, "transcode") as transcode: # A uri without fields and an empty body will force all the # request fields to show up in the query_params. transcode_result = { "uri": "v1/sample_method", "method": "get", "query_params": request_init, } transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 json_return_value = compute.DiskTypeAggregatedList.to_json(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value response = client.aggregated_list(request) expected_params = [] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params def test_aggregated_list_rest_unset_required_fields(): transport = transports.DiskTypesRestTransport( credentials=ga_credentials.AnonymousCredentials ) unset_fields = transport.aggregated_list._get_unset_required_fields({}) assert set(unset_fields) == ( set( ( "filter", "includeAllScopes", "maxResults", "orderBy", "pageToken", "returnPartialSuccess", ) ) & set(("project",)) ) @pytest.mark.parametrize("null_interceptor", [True, False]) def test_aggregated_list_rest_interceptors(null_interceptor): transport = transports.DiskTypesRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.DiskTypesRestInterceptor(), ) client = DiskTypesClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( transports.DiskTypesRestInterceptor, "post_aggregated_list" ) as post, mock.patch.object( transports.DiskTypesRestInterceptor, "pre_aggregated_list" ) as pre: pre.assert_not_called() post.assert_not_called() transcode.return_value = { "method": "post", "uri": "my_uri", "body": None, "query_params": {}, } req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() req.return_value._content = compute.DiskTypeAggregatedList.to_json( compute.DiskTypeAggregatedList() ) request = compute.AggregatedListDiskTypesRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata post.return_value = compute.DiskTypeAggregatedList client.aggregated_list( request, metadata=[("key", "val"), ("cephalopod", "squid"),] ) pre.assert_called_once() post.assert_called_once() def test_aggregated_list_rest_bad_request( transport: str = "rest", request_type=compute.AggregatedListDiskTypesRequest ): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # send a request that will satisfy transcoding request_init = {"project": "sample1"} request = request_type(request_init) # Mock the http request call within the method and fake a BadRequest error. with mock.patch.object(Session, "request") as req, pytest.raises( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 400 response_value.request = Request() req.return_value = response_value client.aggregated_list(request) def test_aggregated_list_rest_flattened(): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. return_value = compute.DiskTypeAggregatedList() # get arguments that satisfy an http rule for this method sample_request = {"project": "sample1"} # get truthy value for each flattened field mock_args = dict(project="project_value",) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 json_return_value = compute.DiskTypeAggregatedList.to_json(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value client.aggregated_list(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( "%s/compute/v1/projects/{project}/aggregated/diskTypes" % client.transport._host, args[1], ) def test_aggregated_list_rest_flattened_error(transport: str = "rest"): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.aggregated_list( compute.AggregatedListDiskTypesRequest(), project="project_value", ) def test_aggregated_list_rest_pager(transport: str = "rest"): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # TODO(kbandes): remove this mock unless there's a good reason for it. # with mock.patch.object(path_template, 'transcode') as transcode: # Set the response as a series of pages response = ( compute.DiskTypeAggregatedList( items={ "a": compute.DiskTypesScopedList(), "b": compute.DiskTypesScopedList(), "c": compute.DiskTypesScopedList(), }, next_page_token="abc", ), compute.DiskTypeAggregatedList(items={}, next_page_token="def",), compute.DiskTypeAggregatedList( items={"g": compute.DiskTypesScopedList(),}, next_page_token="ghi", ), compute.DiskTypeAggregatedList( items={ "h": compute.DiskTypesScopedList(), "i": compute.DiskTypesScopedList(), }, ), ) # Two responses for two calls response = response + response # Wrap the values into proper Response objs response = tuple(compute.DiskTypeAggregatedList.to_json(x) for x in response) return_values = tuple(Response() for i in response) for return_val, response_val in zip(return_values, response): return_val._content = response_val.encode("UTF-8") return_val.status_code = 200 req.side_effect = return_values sample_request = {"project": "sample1"} pager = client.aggregated_list(request=sample_request) assert isinstance(pager.get("a"), compute.DiskTypesScopedList) assert pager.get("h") is None results = list(pager) assert len(results) == 6 assert all(isinstance(i, tuple) for i in results) for result in results: assert isinstance(result, tuple) assert tuple(type(t) for t in result) == (str, compute.DiskTypesScopedList) assert pager.get("a") is None assert isinstance(pager.get("h"), compute.DiskTypesScopedList) pages = list(client.aggregated_list(request=sample_request).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.parametrize("request_type", [compute.GetDiskTypeRequest, dict,]) def test_get_rest(request_type): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding request_init = {"project": "sample1", "zone": "sample2", "disk_type": "sample3"} request = request_type(request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. return_value = compute.DiskType( creation_timestamp="creation_timestamp_value", default_disk_size_gb=2097, description="description_value", id=205, kind="kind_value", name="name_value", region="region_value", self_link="self_link_value", valid_disk_size="valid_disk_size_value", zone="zone_value", ) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 json_return_value = compute.DiskType.to_json(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value response = client.get(request) # Establish that the response is the type that we expect. assert isinstance(response, compute.DiskType) assert response.creation_timestamp == "creation_timestamp_value" assert response.default_disk_size_gb == 2097 assert response.description == "description_value" assert response.id == 205 assert response.kind == "kind_value" assert response.name == "name_value" assert response.region == "region_value" assert response.self_link == "self_link_value" assert response.valid_disk_size == "valid_disk_size_value" assert response.zone == "zone_value" def test_get_rest_required_fields(request_type=compute.GetDiskTypeRequest): transport_class = transports.DiskTypesRestTransport request_init = {} request_init["disk_type"] = "" request_init["project"] = "" request_init["zone"] = "" request = request_type(request_init) jsonified_request = json.loads( request_type.to_json( request, including_default_value_fields=False, use_integers_for_enums=False ) ) # verify fields with default values are dropped unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() ).get._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present jsonified_request["diskType"] = "disk_type_value" jsonified_request["project"] = "project_value" jsonified_request["zone"] = "zone_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() ).get._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone assert "diskType" in jsonified_request assert jsonified_request["diskType"] == "disk_type_value" assert "project" in jsonified_request assert jsonified_request["project"] == "project_value" assert "zone" in jsonified_request assert jsonified_request["zone"] == "zone_value" client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) request = request_type(request_init) # Designate an appropriate value for the returned response. return_value = compute.DiskType() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values # for required fields will fail the real version if the http_options # expect actual values for those fields. with mock.patch.object(path_template, "transcode") as transcode: # A uri without fields and an empty body will force all the # request fields to show up in the query_params. transcode_result = { "uri": "v1/sample_method", "method": "get", "query_params": request_init, } transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 json_return_value = compute.DiskType.to_json(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value response = client.get(request) expected_params = [] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params def test_get_rest_unset_required_fields(): transport = transports.DiskTypesRestTransport( credentials=ga_credentials.AnonymousCredentials ) unset_fields = transport.get._get_unset_required_fields({}) assert set(unset_fields) == (set(()) & set(("diskType", "project", "zone",))) @pytest.mark.parametrize("null_interceptor", [True, False]) def test_get_rest_interceptors(null_interceptor): transport = transports.DiskTypesRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.DiskTypesRestInterceptor(), ) client = DiskTypesClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( transports.DiskTypesRestInterceptor, "post_get" ) as post, mock.patch.object( transports.DiskTypesRestInterceptor, "pre_get" ) as pre: pre.assert_not_called() post.assert_not_called() transcode.return_value = { "method": "post", "uri": "my_uri", "body": None, "query_params": {}, } req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() req.return_value._content = compute.DiskType.to_json(compute.DiskType()) request = compute.GetDiskTypeRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata post.return_value = compute.DiskType client.get(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) pre.assert_called_once() post.assert_called_once() def test_get_rest_bad_request( transport: str = "rest", request_type=compute.GetDiskTypeRequest ): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # send a request that will satisfy transcoding request_init = {"project": "sample1", "zone": "sample2", "disk_type": "sample3"} request = request_type(request_init) # Mock the http request call within the method and fake a BadRequest error. with mock.patch.object(Session, "request") as req, pytest.raises( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 400 response_value.request = Request() req.return_value = response_value client.get(request) def test_get_rest_flattened(): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. return_value = compute.DiskType() # get arguments that satisfy an http rule for this method sample_request = { "project": "sample1", "zone": "sample2", "disk_type": "sample3", } # get truthy value for each flattened field mock_args = dict( project="project_value", zone="zone_value", disk_type="disk_type_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 json_return_value = compute.DiskType.to_json(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value client.get(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( "%s/compute/v1/projects/{project}/zones/{zone}/diskTypes/{disk_type}" % client.transport._host, args[1], ) def test_get_rest_flattened_error(transport: str = "rest"): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get( compute.GetDiskTypeRequest(), project="project_value", zone="zone_value", disk_type="disk_type_value", ) def test_get_rest_error(): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @pytest.mark.parametrize("request_type", [compute.ListDiskTypesRequest, dict,]) def test_list_rest(request_type): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding request_init = {"project": "sample1", "zone": "sample2"} request = request_type(request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. return_value = compute.DiskTypeList( id="id_value", kind="kind_value", next_page_token="next_page_token_value", self_link="self_link_value", ) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 json_return_value = compute.DiskTypeList.to_json(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value response = client.list(request) # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListPager) assert response.id == "id_value" assert response.kind == "kind_value" assert response.next_page_token == "next_page_token_value" assert response.self_link == "self_link_value" def test_list_rest_required_fields(request_type=compute.ListDiskTypesRequest): transport_class = transports.DiskTypesRestTransport request_init = {} request_init["project"] = "" request_init["zone"] = "" request = request_type(request_init) jsonified_request = json.loads( request_type.to_json( request, including_default_value_fields=False, use_integers_for_enums=False ) ) # verify fields with default values are dropped unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() ).list._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present jsonified_request["project"] = "project_value" jsonified_request["zone"] = "zone_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() ).list._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set( ("filter", "max_results", "order_by", "page_token", "return_partial_success",) ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone assert "project" in jsonified_request assert jsonified_request["project"] == "project_value" assert "zone" in jsonified_request assert jsonified_request["zone"] == "zone_value" client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) request = request_type(request_init) # Designate an appropriate value for the returned response. return_value = compute.DiskTypeList() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values # for required fields will fail the real version if the http_options # expect actual values for those fields. with mock.patch.object(path_template, "transcode") as transcode: # A uri without fields and an empty body will force all the # request fields to show up in the query_params. transcode_result = { "uri": "v1/sample_method", "method": "get", "query_params": request_init, } transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 json_return_value = compute.DiskTypeList.to_json(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value response = client.list(request) expected_params = [] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params def test_list_rest_unset_required_fields(): transport = transports.DiskTypesRestTransport( credentials=ga_credentials.AnonymousCredentials ) unset_fields = transport.list._get_unset_required_fields({}) assert set(unset_fields) == ( set(("filter", "maxResults", "orderBy", "pageToken", "returnPartialSuccess",)) & set(("project", "zone",)) ) @pytest.mark.parametrize("null_interceptor", [True, False]) def test_list_rest_interceptors(null_interceptor): transport = transports.DiskTypesRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.DiskTypesRestInterceptor(), ) client = DiskTypesClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( transports.DiskTypesRestInterceptor, "post_list" ) as post, mock.patch.object( transports.DiskTypesRestInterceptor, "pre_list" ) as pre: pre.assert_not_called() post.assert_not_called() transcode.return_value = { "method": "post", "uri": "my_uri", "body": None, "query_params": {}, } req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() req.return_value._content = compute.DiskTypeList.to_json(compute.DiskTypeList()) request = compute.ListDiskTypesRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata post.return_value = compute.DiskTypeList client.list(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) pre.assert_called_once() post.assert_called_once() def test_list_rest_bad_request( transport: str = "rest", request_type=compute.ListDiskTypesRequest ): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # send a request that will satisfy transcoding request_init = {"project": "sample1", "zone": "sample2"} request = request_type(request_init) # Mock the http request call within the method and fake a BadRequest error. with mock.patch.object(Session, "request") as req, pytest.raises( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 400 response_value.request = Request() req.return_value = response_value client.list(request) def test_list_rest_flattened(): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. return_value = compute.DiskTypeList() # get arguments that satisfy an http rule for this method sample_request = {"project": "sample1", "zone": "sample2"} # get truthy value for each flattened field mock_args = dict(project="project_value", zone="zone_value",) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 json_return_value = compute.DiskTypeList.to_json(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value client.list(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( "%s/compute/v1/projects/{project}/zones/{zone}/diskTypes" % client.transport._host, args[1], ) def test_list_rest_flattened_error(transport: str = "rest"): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.list( compute.ListDiskTypesRequest(), project="project_value", zone="zone_value", ) def test_list_rest_pager(transport: str = "rest"): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # TODO(kbandes): remove this mock unless there's a good reason for it. # with mock.patch.object(path_template, 'transcode') as transcode: # Set the response as a series of pages response = ( compute.DiskTypeList( items=[compute.DiskType(), compute.DiskType(), compute.DiskType(),], next_page_token="abc", ), compute.DiskTypeList(items=[], next_page_token="def",), compute.DiskTypeList(items=[compute.DiskType(),], next_page_token="ghi",), compute.DiskTypeList(items=[compute.DiskType(), compute.DiskType(),],), ) # Two responses for two calls response = response + response # Wrap the values into proper Response objs response = tuple(compute.DiskTypeList.to_json(x) for x in response) return_values = tuple(Response() for i in response) for return_val, response_val in zip(return_values, response): return_val._content = response_val.encode("UTF-8") return_val.status_code = 200 req.side_effect = return_values sample_request = {"project": "sample1", "zone": "sample2"} pager = client.list(request=sample_request) results = list(pager) assert len(results) == 6 assert all(isinstance(i, compute.DiskType) for i in results) pages = list(client.list(request=sample_request).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.DiskTypesRestTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.DiskTypesRestTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = DiskTypesClient( client_options={"credentials_file": "credentials.json"}, transport=transport, ) # It is an error to provide an api_key and a transport instance. transport = transports.DiskTypesRestTransport( credentials=ga_credentials.AnonymousCredentials(), ) options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): client = DiskTypesClient(client_options=options, transport=transport,) # It is an error to provide an api_key and a credential. options = mock.Mock() options.api_key = "api_key" with pytest.raises(ValueError): client = DiskTypesClient( client_options=options, credentials=ga_credentials.AnonymousCredentials() ) # It is an error to provide scopes and a transport instance. transport = transports.DiskTypesRestTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = DiskTypesClient( client_options={"scopes": ["1", "2"]}, transport=transport, ) def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.DiskTypesRestTransport( credentials=ga_credentials.AnonymousCredentials(), ) client = DiskTypesClient(transport=transport) assert client.transport is transport @pytest.mark.parametrize("transport_class", [transports.DiskTypesRestTransport,]) def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(google.auth, "default") as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() def test_disk_types_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.DiskTypesTransport( credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json", ) def test_disk_types_base_transport(): # Instantiate the base transport. with mock.patch( "google.cloud.compute_v1.services.disk_types.transports.DiskTypesTransport.__init__" ) as Transport: Transport.return_value = None transport = transports.DiskTypesTransport( credentials=ga_credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly # raise NotImplementedError. methods = ( "aggregated_list", "get", "list", ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) with pytest.raises(NotImplementedError): transport.close() def test_disk_types_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch( "google.cloud.compute_v1.services.disk_types.transports.DiskTypesTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.DiskTypesTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", scopes=None, default_scopes=( "https://www.googleapis.com/auth/compute.readonly", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/cloud-platform", ), quota_project_id="octopus", ) def test_disk_types_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( "google.cloud.compute_v1.services.disk_types.transports.DiskTypesTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.DiskTypesTransport() adc.assert_called_once() def test_disk_types_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) DiskTypesClient() adc.assert_called_once_with( scopes=None, default_scopes=( "https://www.googleapis.com/auth/compute.readonly", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/cloud-platform", ), quota_project_id=None, ) def test_disk_types_http_transport_client_cert_source_for_mtls(): cred = ga_credentials.AnonymousCredentials() with mock.patch( "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" ) as mock_configure_mtls_channel: transports.DiskTypesRestTransport( credentials=cred, client_cert_source_for_mtls=client_cert_source_callback ) mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) @pytest.mark.parametrize("transport_name", ["rest",]) def test_disk_types_host_no_port(transport_name): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="compute.googleapis.com" ), transport=transport_name, ) assert client.transport._host == ( "compute.googleapis.com:443" if transport_name in ["grpc", "grpc_asyncio"] else "https://compute.googleapis.com" ) @pytest.mark.parametrize("transport_name", ["rest",]) def test_disk_types_host_with_port(transport_name): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="compute.googleapis.com:8000" ), transport=transport_name, ) assert client.transport._host == ( "compute.googleapis.com:8000" if transport_name in ["grpc", "grpc_asyncio"] else "https://compute.googleapis.com:8000" ) def test_common_billing_account_path(): billing_account = "squid" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) actual = DiskTypesClient.common_billing_account_path(billing_account) assert expected == actual def test_parse_common_billing_account_path(): expected = { "billing_account": "clam", } path = DiskTypesClient.common_billing_account_path(**expected) # Check that the path construction is reversible. actual = DiskTypesClient.parse_common_billing_account_path(path) assert expected == actual def test_common_folder_path(): folder = "whelk" expected = "folders/{folder}".format(folder=folder,) actual = DiskTypesClient.common_folder_path(folder) assert expected == actual def test_parse_common_folder_path(): expected = { "folder": "octopus", } path = DiskTypesClient.common_folder_path(**expected) # Check that the path construction is reversible. actual = DiskTypesClient.parse_common_folder_path(path) assert expected == actual def test_common_organization_path(): organization = "oyster" expected = "organizations/{organization}".format(organization=organization,) actual = DiskTypesClient.common_organization_path(organization) assert expected == actual def test_parse_common_organization_path(): expected = { "organization": "nudibranch", } path = DiskTypesClient.common_organization_path(**expected) # Check that the path construction is reversible. actual = DiskTypesClient.parse_common_organization_path(path) assert expected == actual def test_common_project_path(): project = "cuttlefish" expected = "projects/{project}".format(project=project,) actual = DiskTypesClient.common_project_path(project) assert expected == actual def test_parse_common_project_path(): expected = { "project": "mussel", } path = DiskTypesClient.common_project_path(**expected) # Check that the path construction is reversible. actual = DiskTypesClient.parse_common_project_path(path) assert expected == actual def test_common_location_path(): project = "winkle" location = "nautilus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) actual = DiskTypesClient.common_location_path(project, location) assert expected == actual def test_parse_common_location_path(): expected = { "project": "scallop", "location": "abalone", } path = DiskTypesClient.common_location_path(**expected) # Check that the path construction is reversible. actual = DiskTypesClient.parse_common_location_path(path) assert expected == actual def test_client_with_default_client_info(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object( transports.DiskTypesTransport, "_prep_wrapped_messages" ) as prep: client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) with mock.patch.object( transports.DiskTypesTransport, "_prep_wrapped_messages" ) as prep: transport_class = DiskTypesClient.get_transport_class() transport = transport_class( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) def test_transport_close(): transports = { "rest": "_session", } for transport, close_name in transports.items(): client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) with mock.patch.object( type(getattr(client.transport, close_name)), "close" ) as close: with client: close.assert_not_called() close.assert_called_once() def test_client_ctx(): transports = [ "rest", ] for transport in transports: client = DiskTypesClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) # Test client calls underlying transport. with mock.patch.object(type(client.transport), "close") as close: close.assert_not_called() with client: pass close.assert_called() @pytest.mark.parametrize( "client_class,transport_class", [(DiskTypesClient, transports.DiskTypesRestTransport),], ) def test_api_key_credentials(client_class, transport_class): with mock.patch.object( google.auth._default, "get_api_key_credentials", create=True ) as get_api_key_credentials: mock_cred = mock.Mock() get_api_key_credentials.return_value = mock_cred options = client_options.ClientOptions() options.api_key = "api_key" with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, )
googleapis/python-compute
tests/unit/gapic/compute_v1/test_disk_types.py
Python
apache-2.0
66,139
[ "Octopus" ]
449f8f03eec2004f932159d71b2e0ea243aa1badd3bc637497acdd02226e52d1
""" Composite actions over managers shared between HTTP endpoint (routes.py) and message queue. """ from pulsar.client.setup_handler import build_job_config from pulsar.managers import status from pulsar.managers import PULSAR_UNKNOWN_RETURN_CODE from galaxy.tools.deps import dependencies import os def status_dict(manager, job_id): job_status = manager.get_status(job_id) return full_status(manager, job_status, job_id) def full_status(manager, job_status, job_id): if job_status in [status.COMPLETE, status.CANCELLED]: full_status = __job_complete_dict(job_status, manager, job_id) else: full_status = {"complete": "false", "status": job_status, "job_id": job_id} return full_status def __job_complete_dict(complete_status, manager, job_id): """ Build final dictionary describing completed job for consumption by Pulsar client. """ return_code = manager.return_code(job_id) if return_code == PULSAR_UNKNOWN_RETURN_CODE: return_code = None stdout_contents = manager.stdout_contents(job_id).decode("utf-8") stderr_contents = manager.stderr_contents(job_id).decode("utf-8") job_directory = manager.job_directory(job_id) as_dict = dict( job_id=job_id, complete="true", # Is this still used or is it legacy. status=complete_status, returncode=return_code, stdout=stdout_contents, stderr=stderr_contents, working_directory=job_directory.working_directory(), working_directory_contents=job_directory.working_directory_contents(), outputs_directory_contents=job_directory.outputs_directory_contents(), system_properties=manager.system_properties(), ) return as_dict def submit_job(manager, job_config): """ Launch new job from specified config. May have been previously 'setup' if 'setup_params' in job_config is empty. """ # job_config is raw dictionary from JSON (from MQ or HTTP endpoint). job_id = job_config.get('job_id') command_line = job_config.get('command_line') setup_params = job_config.get('setup_params', {}) force_setup = job_config.get('setup') remote_staging = job_config.get('remote_staging', {}) dependencies_description = job_config.get('dependencies_description', None) env = job_config.get('env', []) submit_params = job_config.get('submit_params', {}) job_config = None if setup_params or force_setup: input_job_id = setup_params.get("job_id", job_id) tool_id = setup_params.get("tool_id", None) tool_version = setup_params.get("tool_version", None) job_config = setup_job(manager, input_job_id, tool_id, tool_version) if job_config is not None: job_directory = job_config["job_directory"] jobs_directory = os.path.abspath(os.path.join(job_directory, os.pardir)) command_line = command_line.replace('__PULSAR_JOBS_DIRECTORY__', jobs_directory) # TODO: Handle __PULSAR_JOB_DIRECTORY__ config files, metadata files, etc... manager.handle_remote_staging(job_id, remote_staging) dependencies_description = dependencies.DependenciesDescription.from_dict(dependencies_description) return manager.launch( job_id, command_line, submit_params, dependencies_description=dependencies_description, env=env ) def setup_job(manager, job_id, tool_id, tool_version): """ Setup new job from these inputs and return dict summarizing state (used to configure command line). """ job_id = manager.setup_job(job_id, tool_id, tool_version) return build_job_config( job_id=job_id, job_directory=manager.job_directory(job_id), system_properties=manager.system_properties(), tool_id=tool_id, tool_version=tool_version, )
ssorgatem/pulsar
pulsar/manager_endpoint_util.py
Python
apache-2.0
3,837
[ "Galaxy" ]
7c8e1307d503d2486242bfdd7fee4b08623fe99424d62beccfcd72d058ab349b
# Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ ************************************ **FixedPairListAdress** - Object ************************************ The FixedPairListAdress is the Fixed Pair List to be used for AdResS or H-AdResS simulations. When creating the FixedPairListAdress one has to provide the storage and the tuples. Afterwards the bonds can be added. In the example "bonds" is a python list of the form ( (pid1, pid2), (pid3, pid4), ...) where each inner pair defines a bond between the particles with the given particle ids. Example - creating the FixedPairListAdress and adding bonds: >>> ftpl = espresso.FixedTupleList(system.storage) >>> fpl = espresso.FixedPairListAdress(system.storage, ftpl) >>> fpl.addBonds(bonds) """ from espresso import pmi import _espresso import espresso from espresso.esutil import cxxinit class FixedPairListAdressLocal(_espresso.FixedPairListAdress): 'The (local) fixed pair list.' def __init__(self, storage, fixedtupleList): 'Local construction of a fixed pair list' if pmi.workerIsActive(): cxxinit(self, _espresso.FixedPairListAdress, storage, fixedtupleList) def add(self, pid1, pid2): 'add pair to fixed pair list' if pmi.workerIsActive(): return self.cxxclass.add(self, pid1, pid2) def getBonds(self): 'return the bonds of the GlobalPairList' if pmi.workerIsActive(): bonds=self.cxxclass.getBonds(self) return bonds def addBonds(self, bondlist): """ Each processor takes the broadcasted bondlist and adds those pairs whose first particle is owned by this processor. """ if pmi.workerIsActive(): for bond in bondlist: pid1, pid2 = bond self.cxxclass.add(self, pid1, pid2) if pmi.isController: class FixedPairListAdress(object): __metaclass__ = pmi.Proxy pmiproxydefs = dict( cls = 'espresso.FixedPairListAdressLocal', localcall = [ "add" ], pmicall = [ "addBonds" ], pmiinvoke = ['getBonds'] )
BackupTheBerlios/espressopp
src/FixedPairListAdress.py
Python
gpl-3.0
2,974
[ "ESPResSo" ]
9c831d18e3c9224004919686509564744f311db7b2d052a6cd6f62573a769521
""" Open-source code by @andersenchen and @sbchou at Knewton. August 2013 Contains class KalmanFilter, an implementation of the hybrid Kalman filter algorithm), and a Simulator class for testing. """ import itertools import math import random import numpy as np class KalmanFilter: """ A class for statistical inference of timeseries data using the Kalman Filter algorithm. The system we solve is a set of latent scalars X = {x_1, x_2,...,x_k} given scalar observations Z = {z_1, z_2,...,z_k} occuring at times {t_1, t_2,...,t_k}, where our latent state is evolved as a Gaussian random walk: x_k = x_{k_1} + sqrt(t_k - t_{k-1}) * w_k with w_k ~ N(0, q) and t_k - t_{k-1} being the time difference between our current and previous observations. And an observation of our latent state is made according to: z_k = x_k + v_k with v_k ~ N(0, r) """ def __init__(self, x0=0, var0=0.001, q=0.01, r=0.1): """ Initialize starting values. (x0, var0) represent starting mean and variance estimates of the filter. q is the variance of the latent process, and r is the variance of the observation noise. Parameters: x0: Scalar that represents the starting mean estimate of the filter var0: Positive scalar that represents the starting variance estimate of the filter q: Positive scalar representing the latent process variance r: Positive scalar representing the observation noise variance """ assert var0 > 0, "starting variance should be a positive scalar" assert q > 0, "latent state variance should be a positive scalar" assert r > 0, "observation noise variance should be positive scalar" self.x_curr = x0 self.var_curr = var0 self.q = q self.r = r self.initialize_estimates() def initialize_estimates(self): """ Start with empty list of means and variances """ self.means = [] self.variances = [] def update(self, observation, timeDifference): """ Update our posterior to account for a new (observation, timeDifference)) """ assert timeDifference > 0, "timeDifference should be a positive scalar" x_predict = self.x_curr #prediction step var_predict = self.var_curr + self.q * math.sqrt(timeDifference) kalman_gain = var_predict / (var_predict + self.r) self.x_curr = x_predict + kalman_gain * (observation - x_predict) self.var_curr = kalman_gain * self.r self.means.append(self.x_curr) self.variances.append(self.var_curr) return self.x_curr, self.var_curr def filter_observations(self, observations, timeDifferences): """ Run the filter on multiple observations. The length of observations should equal the length of timeDifferences. """ assert len(observations) == len(timeDifferences) for obs, t in itertools.izip(observations, timeDifferences): self.update(obs, t) return self.means, self.variances class Simulator: """ A class for generating simulation data and running it through the Kalman filter. """ def __init__(self): pass def test_constant(self, var_z, n=1000): """ We have a constant latent state at zero having observation noise with variance var_z. Observation times are assumed to be equally spaced. We simulate data according to this model and do parameter recovery. Parameters: var_z: Positive scalar representing the variance of the observation noise """ assert var_z > 0, "var_z should be a positive scalar" observations = [random.gauss(0, var_z) for i in xrange(n)] timeDifferences = [1] * n kalman = KalmanFilter() kalman.filter_observations(observations, timeDifferences) self.visualize(kalman, [0] * n, observations, timeDifferences) def generate_noisy_gaussian_walk(self, var_x, var_z, max_time, n=1000): """ Generate simulated data with n continuous random gaussian observations and uniform random time differences, where var_x is the variance of the latent process governing x_k, and var_z is the variance of the observation noise, and n is the number of observations made. We draw time differences ~ Unif(0, max_time) where max_time is the maximum time difference. Parameters: var_x: Positive scalar representing the variance of the latentprocess var_z: Positive scalar representing the variance of the observation noise max_time: Positive scalar representing the maximum possible time difference between two observations n: Positive integer representing the number of observations to be generated """ assert var_x > 0, "var_x should be a positive scalar" assert var_z > 0, "var_z should be a positive scalar" assert max_time > 0, "max_time should be a positive scalar" assert n > 0 and int(n) == n, "n should be a positive integer" observations = [] truth = [] timeDifferences = [random.random() * max_time for i in xrange(n)] latent = 0 for i in xrange(n): latent += random.gauss(0, var_x) * math.sqrt(timeDifferences[i]) truth.append(latent) observations.append(latent + random.gauss(0, var_z)) return truth, observations, timeDifferences def test_noisy_gaussian_walk(self, test_var_latent=0.01, test_var_observed=0.1, test_max_time_diff=5.): """ Test recovery of latent parameters generated by noisy gaussian walk (continuous random time differences) using the Kalman Filter. Parameters: test_var_latent: Positive scalar representing the variance of thelatent process we generate test_var_observed: Positive scalar representing the variance of the observation noise we generate test_max_time_diff: """ assert test_var_latent > 0, "test_var_latent should be a positive scalar" assert test_var_observed > 0, "test_var_observed should be a positive scalar" assert test_max_time_diff > 0, "test_max_time_diff should be a positive scalar" truth, observations, timeDifferences = \ self.generate_noisy_gaussian_walk(test_var_latent, test_var_observed, test_max_time_diff) kalman = KalmanFilter() kalman.filter_observations(observations, timeDifferences) self.visualize(kalman, truth, observations, timeDifferences) def visualize(self, kalman, truth, observations, timeDifferences): """ Plot recovered and original latent variables along with observations using matplotlib """ from matplotlib import pyplot as plt plt.xlabel("Time") plt.ylabel("Latent state") plt.title("Latent state vs. time") p1, = plt.plot(np.cumsum(timeDifferences), truth) p2, = plt.plot(np.cumsum(timeDifferences), kalman.means) xLength = len(kalman.means) p3 = plt.scatter(np.cumsum(timeDifferences), observations, alpha = 0.2) bottom = [kalman.means[i] - 2 * math.sqrt(kalman.variances[i]) for i in xrange(xLength)] top = [kalman.means[i] + 2 * math.sqrt(kalman.variances[i]) for i in xrange(xLength)] plt.fill_between(np.cumsum(timeDifferences), bottom, top, color="green", alpha=0.2) p = plt.Rectangle((0,0), 1, 1, fc="g", alpha=0.2) plt.legend((p1, p2, p3, p), ("Truth", "Posterior means", "Observations", "95 Percent Confidence Interval")) plt.show() if __name__ == '__main__': s = Simulator() s.test_noisy_gaussian_walk()
Knewton/Kalman
open_source_kalman.py
Python
mit
8,086
[ "Gaussian" ]
a1cf5cdeee154d2c32cb673a21cfcade37a4a6264cd59cca55ce3c779893287d
# -*- coding: utf-8 -*- """ Deal with DER encoding and decoding. Adapted from python-ecdsa at https://github.com/warner/python-ecdsa Copyright (c) 2010 Brian Warner Portions written in 2005 by Peter Pearson and placed in the public domain. The MIT License (MIT) Copyright (c) 2013 by Richard Kiss Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import binascii from pycoin.intbytes import int2byte class UnexpectedDER(Exception): pass def encode_integer(r): assert r >= 0 # can't support negative numbers yet h = "%x" % r if len(h) % 2: h = "0" + h s = binascii.unhexlify(h.encode("utf8")) if ord(s[:1]) <= 0x7f: return b"\x02" + int2byte(len(s)) + s else: # DER integers are two's complement, so if the first byte is # 0x80-0xff then we need an extra 0x00 byte to prevent it from # looking negative. return b"\x02" + int2byte(len(s)+1) + b"\x00" + s def encode_sequence(*encoded_pieces): total_len = sum([len(p) for p in encoded_pieces]) return b"\x30" + encode_length(total_len) + b"".join(encoded_pieces) def remove_sequence(string): if not string.startswith(b"\x30"): raise UnexpectedDER( "wanted sequence (0x30), got string length %d %s" % ( len(string), binascii.hexlify(string[:10]))) length, lengthlength = read_length(string[1:]) endseq = 1+lengthlength+length return string[1+lengthlength:endseq], string[endseq:] def remove_integer(string, use_broken_open_ssl_mechanism=False): # OpenSSL treats DER-encoded negative integers (that have their most significant # bit set) as positive integers. Some apps depend upon this bug. if not string.startswith(b"\x02"): raise UnexpectedDER("did not get expected integer 0x02") length, llen = read_length(string[1:]) if len(string) < 1+llen+length: raise UnexpectedDER("ran out of integer bytes") numberbytes = string[1+llen:1+llen+length] rest = string[1+llen+length:] v = int(binascii.hexlify(numberbytes), 16) if ord(numberbytes[:1]) >= 0x80: if not use_broken_open_ssl_mechanism: v -= (1 << (8 * length)) return v, rest def encode_length(l): assert l >= 0 if l < 0x80: return int2byte(l) s = "%x" % l if len(s) % 2: s = "0"+s s = binascii.unhexlify(s) llen = len(s) return int2byte(0x80 | llen) + s def read_length(string): s0 = ord(string[:1]) if not (s0 & 0x80): # short form return (s0 & 0x7f), 1 # else long-form: b0&0x7f is number of additional base256 length bytes, # big-endian llen = s0 & 0x7f if llen > len(string)-1: raise UnexpectedDER("ran out of length bytes") return int(binascii.hexlify(string[1:1+llen]), 16), 1+llen def sigencode_der(r, s): return encode_sequence(encode_integer(r), encode_integer(s)) def sigdecode_der(sig_der, use_broken_open_ssl_mechanism=True): # if use_broken_open_ssl_mechanism is true, this is a non-standard implementation rs_strings, empty = remove_sequence(sig_der) r, rest = remove_integer(rs_strings, use_broken_open_ssl_mechanism=use_broken_open_ssl_mechanism) s, empty = remove_integer(rest, use_broken_open_ssl_mechanism=use_broken_open_ssl_mechanism) return r, s
zsulocal/pycoin
pycoin/tx/script/der.py
Python
mit
4,302
[ "Brian" ]
c4da12a03ef4656b837f9bd52e8b48a2375c4c482bb05a91e23bbd981f5ecb91
# Copyright 2010 http://www.collabq.com import logging import re from django.conf import settings from common import api from common import clean from common import exception from common import patterns from common import user from common import util from common.protocol import base from common.protocol import sms HELP_HUH = "Sorry, did not understand \"%s\". Send HELP for commands" HELP_WELCOME = "Welcome to %s SMS! Questions? Contact support@%s" % (util.get_metadata('SITE_NAME'), settings.HOSTED_DOMAIN) HELP_WELCOME_NICK = "Welcome to %s SMS %s! Questions? Contact support@%s" % (util.get_metadata('SITE_NAME'), '%s', settings.HOSTED_DOMAIN) HELP_DOUBLE_OPT_IN = "To confirm you'd like to receive SMS updates, reply YES. You'll only have to do this once." HELP_SIGNED_OUT = "You have signed out." HELP_CHARGES = "%s is free. Other charges may apply." % (util.get_metadata('SITE_NAME')) HELP_HELP_1 = "%s SMS updates. To get alerts text FOLLOW user/channelname. To stop text LEAVE user/channelname. To stop all alerts text STOP. To resume text START" % (util.get_metadata('SITE_NAME')) HELP_HELP_2 = "Complete list on %s/sms. Other charges may apply. Questions? Contact support@%s" % (settings.DOMAIN, settings.HOSTED_DOMAIN) HELP_NOT_SIGNED_IN = "You are currently signed out\n" HELP_SIGNED_IN_AS = "You are signed in as '%s'\n" HELP_FOLLOW_ONLY = "You are signed in as a follow-only user\n" HELP_PASSWORD = "Your password is: %s\n" \ "Use it to sign in on the web at http://%s/\n" % ('%s', settings.DOMAIN) HELP_POST = "To post to your stream, just send a message" HELP_CHANNEL_POST = "To post to a channel, start your message with " \ "#channel" HELP_COMMENT = "To comment on the latest update from someone, start " \ "with @user" HELP_FOLLOW = "To follow a user or channel, send FOLLOW <user/#channel>" HELP_FOLLOW_NEW = "Send FOLLOW <user/#channel> to just follow a user or " \ "channel without signing up" HELP_LEAVE = "To stop following a user or channel, send LEAVE <user/#channel>" HELP_STOP = "To stop all alerts, send OFF" HELP_START = "To turn on alerts, send ON" HELP_SIGN_OUT = "To sign out from %s SMS, send SIGN OUT" % (util.get_metadata('SITE_NAME')) HELP_DELETE_ME = "To remove your %s account, send DELETE ME" % (util.get_metadata('SITE_NAME')) HELP_SIGN_IN = "Send SIGN IN <screen name> <password> if you already have a " \ "%s account" % (util.get_metadata('SITE_NAME')) HELP_SIGN_UP = "Send SIGN UP <desired screen name> to create a new account" HELP_MORE = "For more commands, type HELP" HELP_FOOTER = "\n" \ "Questions? Visit http://%s/help/im\n" \ "Contact us at support@%s" % (settings.DOMAIN, settings.HOSTED_DOMAIN) HELP_FOOTER_INFORMAL = "\n" \ "How it all works: http://%s/help/im" % (settings.DOMAIN) HELP_OTR = "Your IM client has tried to initiate an OTR (off-the-record) session. However, this bot does not support OTR." HELP_START_NOTIFICATIONS = "SMS notifications have been enabled. Send OFF to stop, HELP for commands." HELP_STOP_NOTIFICATIONS = "SMS notifications have been disabled. Send ON to start receiving again." class SmsService(base.Service): handlers = [patterns.SignInHandler, patterns.SignOutHandler, patterns.PromotionHandler, patterns.HelpHandler, patterns.CommentHandler, patterns.OnHandler, patterns.OffHandler, patterns.ChannelPostHandler, patterns.FollowHandler, patterns.LeaveHandler, patterns.ConfirmHandler, patterns.PostHandler, ] # TODO(termie): the following should probably be part of some sort of # service interface, it is almost an exact duplicate of # ImService def response_ok(self, rv=None): return "" def response_error(self, exc): return str(exc) def channel_join(self, sender, nick): sender_ref = api.actor_lookup_mobile(api.ROOT, sender) if not sender_ref: raise exception.ValidationError(HELP_SIGN_IN) channel = clean.channel(nick) try: api.channel_join(sender_ref, sender_ref.nick, channel) self.send_message((sender,), "%s joined %s" % (sender_ref.display_nick(), nick)) except: self.send_message((sender,), "Failed to join %s" % nick) def channel_part(self, sender, nick): sender_ref = api.actor_lookup_mobile(api.ROOT, sender) if not sender_ref: raise exception.ValidationError(HELP_SIGN_IN) channel = clean.channel(nick) try: api.channel_part(sender_ref, sender_ref.nick, channel) self.send_message((sender,), "%s left %s" % (sender_ref.display_nick(), nick)) except: self.send_message((sender,), "Failed to leave %s" % nick) def confirm(self, sender): """ confirm something if something needs to be confirmed otherwise, just post the message """ sender_ref = api.actor_lookup_mobile(api.ROOT, sender) if not sender_ref: raise exception.ValidationError(HELP_SIGN_IN) if sender_ref.extra.get('sms_double_opt_in', None): api.mobile_confirm_doubleoptin(api.ROOT, sender_ref.nick) self.start_notifications(sender) def actor_add_contact(self, sender, nick): sender_ref = api.actor_lookup_mobile(api.ROOT, sender) if not sender_ref: raise exception.ValidationError(HELP_SIGN_IN) clean_nick = clean.nick(nick) try: api.actor_add_contact(sender_ref, sender_ref.nick, clean_nick) self.send_message((sender,), "%s followed %s" % (sender_ref.display_nick(), nick)) except: self.send_message((sender,), "Failed to follow %s" % nick) def actor_remove_contact(self, sender, nick): sender_ref = api.actor_lookup_mobile(api.ROOT, sender) if not sender_ref: raise exception.ValidationError(HELP_SIGN_IN) clean_nick = clean.nick(nick) try: api.actor_remove_contact(sender_ref, sender_ref.nick, clean_nick) self.send_message((sender,), "%s stopped following %s" % (sender_ref.dispaly_nick(), nick)) except: self.send_message((sender,), "Failed to stop following %s" % nick) def send_message(self, to_list, message): self.connection.send_message(to_list, message) def unknown(self, sender, message): self.send_message([sender], HELP_HUH % message) def sign_in(self, sender, nick, password): sender_ref = api.actor_lookup_mobile(api.ROOT, sender) if sender_ref: raise exception.ValidationError( "You are already signed in, please SIGN OUT first") user_ref = user.authenticate_user_login(nick, password) if not user_ref: raise exception.ValidationError("Username or password is incorrect") mobile_ref = api.mobile_associate(api.ROOT, user_ref.nick, sender) # if they need to double opt in send them the confirmation message welcome = ' '.join([HELP_WELCOME_NICK % user_ref.display_nick(), HELP_POST, HELP_START, HELP_CHARGES ]) self.send_message([sender], welcome) def sign_out(self, sender): sender_ref = api.actor_lookup_mobile(api.ROOT, sender) if not sender_ref: raise exception.ValidationError(HELP_SIGN_IN) mobile_ref = api.mobile_disassociate(api.ROOT, sender_ref.nick, sender) self.send_message([sender], HELP_SIGNED_OUT) def help(self, sender): welcome = ' '.join([HELP_HELP_1, HELP_HELP_2, ]) self.send_message([sender], welcome) def start_notifications(self, sender): sender_ref = api.actor_lookup_mobile(api.ROOT, sender) if not sender_ref: raise exception.ValidationError(HELP_SIGN_IN) if sender_ref.extra.get('sms_double_opt_in', None): message = ' '.join([HELP_DOUBLE_OPT_IN, HELP_CHARGES]) self.send_message([sender], message) return actor_ref = api.settings_change_notify(api.ROOT, sender_ref.nick, sms=True) message = ' '.join([HELP_START_NOTIFICATIONS, HELP_CHARGES]) self.send_message([sender], message) def stop_notifications(self, sender): sender_ref = api.actor_lookup_mobile(api.ROOT, sender) if not sender_ref: raise exception.ValidationError(HELP_SIGN_IN) actor_ref = api.settings_change_notify(api.ROOT, sender_ref.nick, sms=False) self.send_message([sender], HELP_STOP_NOTIFICATIONS) def post(self, sender, message): sender_ref = api.actor_lookup_mobile(api.ROOT, sender) if not sender_ref: raise exception.ValidationError(HELP_SIGN_IN) entry_ref = api.post(sender_ref, nick=sender_ref.nick, message=message) def channel_post(self, sender, channel_nick, message): sender_ref = api.actor_lookup_mobile(api.ROOT, sender) if not sender_ref: raise exception.ValidationError(HELP_SIGN_IN) comment_ref = api.channel_post( sender_ref, message=message, nick=sender_ref.nick, channel=channel_nick ) def add_comment(self, sender, nick, message): sender_ref = api.actor_lookup_mobile(api.ROOT, sender) if not sender_ref: raise exception.ValidationError(HELP_SIGN_IN) logging.debug("comment: %s %s %s", nick, sender_ref.nick, message) nick = clean.nick(nick) stream_entry = api.reply_get_cache(sender=nick, target=sender_ref.nick, service='sms') if not stream_entry: # Well, or memcache timed it out... Or we crashed... Or... Or... raise exception.ValidationError( 'The message to which you tried to respond doesn\'t exist') api.entry_add_comment(sender_ref, entry=stream_entry.keyname(), content=message, nick=sender_ref.nick, stream=stream_entry.stream)
CollabQ/CollabQ
common/sms.py
Python
apache-2.0
10,292
[ "VisIt" ]
c2db0ccb3a4139501cf1c20bd07e9d403c36230bc3d4ffb6f09363784f620888
# Copyright (C) 2017 Martin Nilsson # This file is part of the Memtran compiler. # # The Memtran compiler is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # The Memtran compiler is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with the Memtran compiler. If not, see http://www.gnu.org/licenses/ . import util from ast import * from fun_dict_stuff import * from type_identity import * ################### INTERNAL TYPES FOR TYPE INFERENCE AID ################### class NAlternativePossibilitiesType(NType): # TODO: maybe remove this type if not needed! # Arraylist<NType> alternativesList; def __init__(self, lineNr, rowNr, alternativesList): self.lineNr = lineNr self.rowNr = rowNr self.alternativesList = alternativesList def print_it(self): for alt in self.alternativesList: print("ALT: ", end='') alt.print_it() def get_line_nr(self): return self.lineNr def get_row_nr(self): return self.rowNr def create_copy(self): altListCopy = [] for alt in self.alternativesList: altListCopy.append(alt.create_copy()) return NAlternativePossibilitiesType(self.lineNr, self.rowNr, altListCopy) def accept_visitor(self, visitor): for alt in self.alternativesList: success = alt.accept_visitor(visitor) if success == False: return False return visitor.visit(self) class NUnknownType(NType): def __init__(self, lineNr, rowNr): self.lineNr = lineNr self.rowNr = rowNr def print_it(self): print("UNKNOWN_TYPE", end='') def get_line_nr(self): return self.lineNr def get_row_nr(self): return self.rowNr def create_copy(self): return NUnknownType(self.lineNr, self.rowNr) def accept_visitor(self, visitor): return False # You should not visit these kinds of types actually class NUnspecializedTemplateType(NType): # long lineNr; # long rowNr; # ArrayList<NIdentifier> params; # ArrayList<NTypeArg> typeArgs; # ArrayList<NType> returnTypes; pass # TODO (if needed) class NInsertVariantBoxingHere(NType): # TODO: we probably won't need this type # NType constituentType def __init__(self, lineNr, rowNr, superType, constituentType): self.lineNr = lineNr self.rowNr = rowNr self.superType = superType self.constituentType = constituentType def print_it(self): print("VARIANT_BOX_ME/", end='') self.constituentType.print_it() print("\\ as ", end='') self.superType.print_it() def get_line_nr(self): return self.lineNr def get_row_nr(self): return self.rowNr def create_copy(self): return NInsertVariantBoxingHere(self.lineNr, self.rowNr, self.superType.create_copy(), self.constituentType.create_copy()) def accept_visitor(self, visitor): return False # You should not visit these kinds of types actually ############################## HELPER STUFF FOR TYPE CHECKING / INFERENCE ########################### integerChoices = [NISizeType(0, 0), NU8Type(0, 0)] # Add more choices later... floatingChoices = [NF64Type(0, 0), NF32Type(0, 0)] ##################################################################################################### # returns a tuple (inferredType, transformedExpression) . inferredType == False means the inferring failed # Also annotates the newExpression with "inferredType" field # Also annotates identifier expressions with "mangledName" field # Also annotates (certain, currently) identifier expressions with "identifierType" field (which, in case of indexings may not be the same as 'inferredType') def type_infer_and_annotate_expression( expr, inferredTypeFromBelowRaw, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, beSilentFlag ): inferredTypeFromBelow = concretize(inferredTypeFromBelowRaw, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) # First, check whether we are matching against a variant-box type! if isinstance(inferredTypeFromBelow, NVariantBoxType): if isinstance(expr, NVariantBoxExpression): typeResult, exprResult = type_infer_and_annotate_expression( expr.expression, NUnknownType(expr.lineNr, expr.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, beSilentFlag ) if typeResult == False: return (False, None) for typ in inferredTypeFromBelow.types: matchResult = match_as_below( concretize(typ, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict), concretize(exprResult, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: if not beSilentFlag: exprResult.expression.inferredType = concretize(typeResult.create_copy(), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) exprResult.inferredType = inferredTypeFromBelow.createCopy() return (inferredTypeFromBelow.create_copy(), exprResult) # and if we are still here: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Type mismatch.") return (False, None) else: typeResult, exprResult = type_infer_and_annotate_expression( expr, NUnknownType(expr.lineNr, expr.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, beSilentFlag ) if typeResult == False: return (False, None) matchResult = extended_match_as_below( inferredTypeFromBelow, typeResult, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult == "several": if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Expression can be implicitly variant-boxed in several ways. Please type specify.") return (False, None) elif matchResult == "vbox": if not beSilentFlag: # annotation exprResult.inferredType = concretize(typeResult.create_copy(), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) vbox = NVariantBoxExpression(expr.lineNr, expr.rowNr, exprResult) if not beSilentFlag: # annotate vbox.inferredType = inferredTypeFromBelow.create_copy() return (inferredTypeFromBelow.create_copy(), vbox) elif matchResult: if not beSilentFlag: exprResult.inferredType = concretize(typeResult.create_copy(), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) return (typeResult.create_copy(), exprResult) else: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Type mismatch.") return (False, None) # If we are still here, continue below... if isinstance(expr, NIdentifierExpression): foundDefinitions = [] # actually, we have to gather all the definitions from all the three levels of dicts. For funlists. if expr.moduleNameOrNull is None: for funDict in reversed(funDictStack): if expr.name.name in funDict: foundDefinitions.append(funDict[expr.name.name]) for moduleName, directlyImportedFunsDict in directlyImportedFunsDictDict.items(): if expr.name.name in directlyImportedFunsDict: entry = directlyImportedFunsDict[expr.name.name] if isinstance(entry, FunListEntry): prunedList = [] for someFunOrTemplate in entry.funEntries: if not someFunOrTemplate.signature.isInternal: prunedList.append(someFunOrTemplate) foundDefinitions.append(FunListEntry(prunedList)) elif isinstance(entry, VarEntry): if not entry.isInternal: foundDefinitions.append(entry) else: util.log_error(expr.lineNr, expr.rowNr, "Undetermined definition of identifier expression during type check. SHOULD NOT HAPPEN") return (False, None) if expr.name.name in builtInFunsDict: foundDefinitions.append(builtInFunsDict[expr.name.name]) else: if expr.moduleNameOrNull in otherImportedModulesFunDictDict: moduleDict = otherImportedModulesFunDictDict[expr.moduleNameOrNull] if expr.name.name in moduleDict: entry = moduleDict[expr.name.name] if isinstance(entry, FunListEntry): prunedList = [] for someFunOrTemplate in entry.funEntries: if not someFunOrTemplate.signature.isInternal: prunedList.append(someFunOrTemplate) foundDefinitions.append(FunListEntry(prunedList)) elif isinstance(entry, VarEntry): if not entry.isInternal: foundDefinitions.append(entry) else: util.log_error(expr.lineNr, expr.rowNr, "Undetermined definition of module specified identifier expression during type check. SHOULD NOT HAPPEN") return (False, None) else: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Reference to module that has not been imported.") return (False, None) if len(foundDefinitions) == 0: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Usage of name that has not been declared.") return (False, None) if isinstance(foundDefinitions[0], FunListEntry): # We can assume that we are not coming from/as a function call name, # since the function call case does not recurse on its function name expression. # Assume that all definitions are fun list entries... I think it will work? myAlternativeList = [] # should contain tuples (typeAlternative, mangledName), or template entries... for foundFunList in foundDefinitions: for entry in foundFunList.funEntries: if isinstance(entry, FunEntry): typeArgs = [] for param in entry.signature.params: if isinstance(param, NNormalParam): typeArgs.append(NNormalTypeArg(param.lineNr, param.rowNr, param.isMut, param.isConstruand, param.argType.create_copy())) else: # NRefParam hopefully... typeArgs.append(NRefTypeArg(param.lineNr, parm.rowNr, param.argType.create_copy())) returnTypes = [] for returnType in entry.signature.returnTypes: returnTypes.append(returnType.create_copy()) myAlternativeList.append( (NFunctionType(expr.lineNr, expr.rowNr, typeArgs, returnTypes), entry.mangledName) ) elif isinstance(entry, SpecializedTemplateEntry): typeArgs = [] for param in entry.signature.params: if isinstance(param, NNormalParam): typeArgs.append(NNormalTypeArg(param.lineNr, param.rowNr, param.isMut, param.isConstruand, param.argType.create_copy())) else: # NRefParam hopefully... typeArgs.append(NRefTypeArg(param.lineNr, parm.rowNr, param.argType.create_copy())) returnTypes = [] for returnType in entry.signature.returnTypes: returnTypes.append(returnType.create_copy()) myAlternativeList.append( (NFunctionType(expr.lineNr, expr.rowNr, typeArgs, returnTypes), entry.mangledName) # hope we don't need anything specifying it as a tspec rather than an ordinary fun entry... ) elif isinstance(entry, TemplateEntry): # How do we add this to the alternatives list without specialising it yet? myAlternativeList.append( entry # we make use of Python's dynamic nature here and simply add the SpecializedTemplateEntry... let's see how this works out... ) else: util.log_error(expr.lineNr, expr.rowNr, "Type checking found FunList entry that is wrong kind, prolly template spec. SHOULD NOT HAPPEN") return (False, None) if len(expr.indexings) > 0: util.log_error(expr.lineNr, expr.rowNr, "Trying to index a function type value somehow (no indexings are possible on such values).") return (False, None) matchResults = [] for tupleOrTemplateEntry in myAlternativesList: if isinstance(tupleOrTemplateEntry, TemplateEntry): matchResults.append(None) # this stands for unresolved... else: alt, mangleName = tupleOrTemplateEntry matchResult = unknownextended_match_as_below( inferredTypeFromBelow, alt, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) matchResults.append(matchResult) nonFalseCount = 0 for matchResult in matchResults: if matchResult != False: nonFalseCount += 1 if nonFalseCount == 0: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Could not infer any suitable type for this identifier expression.") return (False, None) elif nonFalseCount == 1: # We prefer already specialized tspecs to trying specializing anew here: theFoundIndex = -1 for i in range(0, len(matchResults)): if matchResults[i] != False: theFoundIndex = i break if matchResults[theFoundIndex] == None: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Please type specify the template specialization which here is referred to as a function type value.") return (False, None) else: # Then it should be True hopefully... alt, mangleName = myAlternativesList[i] if not beSilentFlag: # annotating... expr.inferredType = concretize(alt, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) expr.mangledName = mangleName return (alt.create_copy(), expr) else: # > 1 if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Please type specify the function which here is referred to as a function type value.") return (False, None) # Should ideally print the alternatives too, but not for now... elif isinstance(foundDefinitions[0], VarEntry): # Assume there is only one def in the definitions list, which should be the case hopefully... if not beSilentFlag: expr.identifierType = concretize(foundDefinitions[0].theType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) accumulatedType = expr.identifierType.create_copy() for indexing in expr.indexings: if isinstance(indexing, NArrayIndexingIndex): indexTypeResult, indexExprResult = type_infer_and_annotate_expression( indexing.indexExpression, NISizeType(indexing.lineNr, indexing.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, beSilentFlag ) if indexTypeResult == False: return (False, None) if isinstance(accumulatedType, NDynamicArrayType): if not beSilentFlag: indexing.inferredType = concretize(accumulatedType.valueType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) accumulatedType = indexing.inferredType.create_copy() else: util.log_error(indexing.lineNr, indexing.rowNr, "Trying to array index a non-array value.") return (False, None) elif isinstance(indexing, NStructIndexingIndex): if isinstance(accumulatedType, NStructType): foundMemberType = None for member in accumulatedType.members: if member.name.name == indexing.indexName.name: foundMemberType = member.theType.create_copy() break if foundMemberType is None: util.log_error(indexing.lineNr, indexing.rowNr, "Struct indexing a non-existing struct field.") return (False, None) if not beSilentFlag: indexing.inferredType = concretize(foundMemberType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) accumulatedType = indexing.inferredType.create_copy() else: if not beSilentFlag: util.log_error(indexing.lineNr, indexing.rowNr, "Trying to struct index something that isn't of struct type.") return (False, None) elif isinstance(indexing, NVariantBoxCastIndex): if isinstance(accumulatedType, NVariantBoxType): foundTypeCase = None for typeCase in accumulatedType.types: matchResult = match_as_below( # we better not do extended match here!!! concretize(typeCase, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict), concretize(indexing.theType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: if not foundTypeCase is None: if not beSilentFlag: util.log_error(indexing.lineNr, indexing.rowNr, "Here, strangely, down-conversion matches several type cases of the variant-box value...") return (False, None) else: foundTypeCase = concretize(typeCase, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) if not beSilentFlag: indexing.inferredType = foundTypeCase accumulatedType = foundTypeCase.create_copy() else: if not beSilentFlag: util.log_error(indexing.lineNr, indexing.rowNr, "Trying to variant-box down-convert non variant-box value.") return (False, None) elif isinstance(indexing, NTypeClarificationIndex): matchResult = match_as_below( # should this be extended -- no! accumulatedType, concretize(indexing.theType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: if not beSilentFlag: indexing.inferredType = concretize(indexing.theType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) accumulatedType = indexing.inferredType.create_copy() else: if not beSilentFlag: util.log_error(indexing.lineNr, indexing.rowNr, "Type mismatch with type clarification...") return (False, None) else: util.log_error(indexing.lineNr, indexing.rowNr, "Type checking found unknown kind of indexing.") return (False, None) matchResult = unknownextended_match_as_below( inferredTypeFromBelow, accumulatedType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, ) if matchResult: if not beSilentFlag: expr.inferredType = concretize(foundDefinitions[0].theType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) expr.mangledName = foundDefinitions[0].mangledName return (expr.inferredType.create_copy(), expr) else: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Type mismatch. #808088080") print("Expected: ", end='') inferredTypeFromBelow.print_it() print("") # newline print("Found: ", end='') foundDefinitions[0].theType.print_it() print("") # newline return (False, None) elif isinstance(foundDefinitions[0], BlockEntry): if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Identifier matched with block entry during type check. SHOULD NOT HAPPEN.") return (False, None) elif isinstance(foundDefinitions[0], ParamEntry): if not beSilentFlag: expr.identifierType = concretize(foundDefinitions[0].definitionParam.theType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) accumulatedType = expr.identifierType.create_copy() for indexing in expr.indexings: if isinstance(indexing, NArrayIndexingIndex): indexTypeResult, indexExprResult = type_infer_and_annotate_expression( indexing.indexExpression, NISizeType(indexing.lineNr, indexing.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, beSilentFlag ) if indexTypeResult == False: return (False, None) if isinstance(accumulatedType, NDynamicArrayType): if not beSilentFlag: indexing.inferredType = concretize(accumulatedType.valueType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) accumulatedType = indexing.inferredType.create_copy() else: util.log_error(indexing.lineNr, indexing.rowNr, "Trying to array index a non-array value.") return (False, None) elif isinstance(indexing, NStructIndexingIndex): if isinstance(accumulatedType, NStructType): foundMemberType = None for member in accumulatedType.members: if member.name.name == indexing.indexName.name: foundMemberType = member.theType.create_copy() break if foundMemberType is None: util.log_error(indexing.lineNr, indexing.rowNr, "Struct indexing a non-existing struct field.") return (False, None) if not beSilentFlag: indexing.inferredType = concretize(foundMemberType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) accumulatedType = indexing.inferredType.create_copy() else: if not beSilentFlag: util.log_error(indexing.lineNr, indexing.rowNr, "Trying to struct index something that isn't of struct type.") return (False, None) elif isinstance(indexing, NVariantBoxCastIndex): if isinstance(accumulatedType, NVariantBoxType): foundTypeCase = None for typeCase in accumulatedType.types: matchResult = match_as_below( # we better not do extended match here!!! concretize(typeCase, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict), concretize(indexing.theType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: if not foundTypeCase is None: if not beSilentFlag: util.log_error(indexing.lineNr, indexing.rowNr, "Here, strangely, down-conversion matches several type cases of the variant-box value...") return (False, None) else: foundTypeCase = concretize(typeCase, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) if not beSilentFlag: indexing.inferredType = foundTypeCase accumulatedType = foundTypeCase.create_copy() else: if not beSilentFlag: util.log_error(indexing.lineNr, indexing.rowNr, "Trying to variant-box down-convert non variant-box value.") return (False, None) elif isinstance(indexing, NTypeClarificationIndex): matchResult = match_as_below( # should this be extended -- no! accumulatedType, concretize(indexing.theType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: if not beSilentFlag: indexing.inferredType = concretize(indexing.theType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) accumulatedType = indexing.inferredType.create_copy() else: if not beSilentFlag: util.log_error(indexing.lineNr, indexing.rowNr, "Type mismatch with type clarification...") return (False, None) else: util.log_error(indexing.lineNr, indexing.rowNr, "Type checking found unknown kind of indexing.") return (False, None) matchResult = unknownextended_match_as_below( inferredTypeFromBelow, accumulatedType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: if not beSilentFlag: expr.inferredType = accumulatedType.create_copy() expr.mangledName = foundDefinitions[0].mangledName return (accumulatedType.create_copy(), expr) else: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Type mismatch.") print("Expected: ", end='') inferredTypeFromBelow.print_it() print("") # newline print("Found: ", end='') accumulatedType.print_it() print("") # newline return (False, None) elif isinstance(foundDefinitions[0], NameIntoBlockEntry): # The name into block entry should have a real type at this point, we presume or hope... if not beSilentFlag: expr.identifierType = concretize(foundDefinitions[0].theType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) accumulatedType = expr.identifierType.create_copy() for indexing in expr.indexings: if isinstance(indexing, NArrayIndexingIndex): indexTypeResult, indexExprResult = type_infer_and_annotate_expression( indexing.indexExpression, NISizeType(indexing.lineNr, indexing.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, beSilentFlag ) if indexTypeResult == False: return (False, None) if isinstance(accumulatedType, NDynamicArrayType): if not beSilentFlag: indexing.inferredType = concretize(accumulatedType.valueType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) accumulatedType = indexing.inferredType.create_copy() else: util.log_error(indexing.lineNr, indexing.rowNr, "Trying to array index a non-array value.") return (False, None) elif isinstance(indexing, NStructIndexingIndex): if isinstance(accumulatedType, NStructType): foundMemberType = None for member in accumulatedType.members: if member.name.name == indexing.indexName.name: foundMemberType = member.theType.create_copy() break if foundMemberType is None: util.log_error(indexing.lineNr, indexing.rowNr, "Struct indexing a non-existing struct field.") return (False, None) if not beSilentFlag: indexing.inferredType = concretize(foundMemberType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) accumulatedType = indexing.inferredType.create_copy() else: if not beSilentFlag: util.log_error(indexing.lineNr, indexing.rowNr, "Trying to struct index something that isn't of struct type.") return (False, None) elif isinstance(indexing, NVariantBoxCastIndex): if isinstance(accumulatedType, NVariantBoxType): foundTypeCase = None for typeCase in accumulatedType.types: matchResult = match_as_below( # we better not do extended match here!!! concretize(typeCase, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict), concretize(indexing.theType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: if not foundTypeCase is None: if not beSilentFlag: util.log_error(indexing.lineNr, indexing.rowNr, "Here, strangely, down-conversion matches several type cases of the variant-box value...") return (False, None) else: foundTypeCase = concretize(typeCase, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) if not beSilentFlag: indexing.inferredType = foundTypeCase accumulatedType = foundTypeCase.create_copy() else: if not beSilentFlag: util.log_error(indexing.lineNr, indexing.rowNr, "Trying to variant-box down-convert non variant-box value.") return (False, None) elif isinstance(indexing, NTypeClarificationIndex): matchResult = match_as_below( # should this be extended -- no! accumulatedType, concretize(indexing.theType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: if not beSilentFlag: indexing.inferredType = concretize(indexing.theType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) accumulatedType = indexing.inferredType.create_copy() else: if not beSilentFlag: util.log_error(indexing.lineNr, indexing.rowNr, "Type mismatch with type clarification...") return (False, None) else: util.log_error(indexing.lineNr, indexing.rowNr, "Type checking found unknown kind of indexing.") return (False, None) matchResult = unknownextended_match_as_below( inferredTypeFromBelow, accumulatedType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: if not beSilentFlag: expr.inferredType = accumulatedType.create_copy() return (accumulatedType.create_copy(), expr) else: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Type mismatch.") print("Expected: ", end='') inferredTypeFromBelow.print_it() print("") # newline print("Found: ", end='') foundType.print_it() print("") # newline return (False, None) else: util.log_error(expr.lineNr, expr.rowNr, "Unknown entry found for identifier during type check. SHOULD NOT HAPPEN.") return (False, None) elif isinstance(expr, NNilExpression): # TODO cont. here foundType = NNilType(expr.lineNr, expr.rowNr) matchResult = unknownextended_match_as_below( inferredTypeFromBelow, foundType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: if not beSilentFlag: expr.inferredType = foundType return (foundType.create_copy(), expr) else: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Type mismatch.") print("Expected: ", end='') inferredTypeFromBelow.print_it() print("") # newline print("Found: ", end='') foundType.print_it() print("") # newline return (False, None) elif isinstance(expr, NTrueExpression) or isinstance(expr, NFalseExpression): foundType = NBoolType(expr.lineNr, expr.rowNr) matchResult = unknownextended_match_as_below( inferredTypeFromBelow, foundType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: if not beSilentFlag: expr.inferredType = foundType return (foundType.create_copy(), expr) else: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Type mismatch.") print("Expected: ", end='') inferredTypeFromBelow.print_it() print("") # newline print("Found: ", end='') foundType.print_it() print("") # newline return (False, None) elif isinstance(expr, NIntegerExpression): for foundType in integerChoices: matchResult = unknownextended_match_as_below( inferredTypeFromBelow, foundType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: if not beSilentFlag: expr.inferredType = foundType.create_copy() return (foundType.create_copy(), expr) # (if we are still here, continue below) if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Found no implemented functioning type alternative for this integer literal.") print("Expected: ", end='') inferredTypeFromBelow.print_it() print("") # newline return (False, None) elif isinstance(expr, NFloatingPointNumberExpression): for foundType in floatingChoices: matchResult = unknownextended_match_as_below( inferredTypeFromBelow, foundType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: if not beSilentFlag: expr.inferredType = foundType.create_copy() return (foundType.create_copy(), expr) # (if we are still here, continue below) if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Found no implemented functioning type alternative for this floating point number literal.") print("Expected: ", end='') inferredTypeFromBelow.print_it() print("") # newline return (False, None) elif isinstance(expr, NStringExpression): foundType = NDynamicArrayType(expr.lineNr, expr.rowNr, NU8Type(expr.lineNr, expr.rowNr)) matchResult = unknownextended_match_as_below( inferredTypeFromBelow, foundType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: if not beSilentFlag: expr.inferredType = foundType return (foundType.create_copy(), expr) else: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Type mismatch.") print("Expected: ", end='') inferredTypeFromBelow.print_it() print("") # newline print("Found: ", end='') foundType.print_it() print("") # newline return (False, None) elif isinstance(expr, NArrayExpressionIndividualValues): if isinstance(inferredTypeFromBelow, NUnknownType): if len(expr.values) == 0: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Unable to infer exact type of empty array expression. Please type specify.") return (False, None) else: firstType = None newValues = [] for value in expr.values: typeResult, exprResult = type_infer_and_annotate_expression( value, NUnknownType(value.lineNr, value.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, beSilentFlag ) if typeResult == False: return (False, None) if firstType is None: firstType = concretize(typeResult, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) else: # how do we match correctly here -- trying match_as_below simply, hope it is enough... matchResult = match_as_below( firstType, concretize(typeResult, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult != True: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Array of disparate types. (Or alternatively, the compiler may be unsufficiently smart here.)") return (False, None) newValues.append(exprResult) if not beSilentFlag: expr.values = newValues expr.inferredType = NDynamicArrayType(expr.lineNr, expr.rowNr, firstType.create_copy()) return (expr.inferredType.create_copy(), expr) elif isinstance(inferredTypeFromBelow, NDynamicArrayType): expectedValueType = concretize(inferredTypeFromBelow.valueType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) newValues = [] for value in expr.values: typeResult, exprResult = type_infer_and_annotate_expression( value, expectedValueType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, beSilentFlag ) if typeResult == False: return (False, None) matchResult = match_as_below( expectedValueType, typeResult, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict ) if matchResult: newValues.append(value) else: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Type mismatch.") print("Expected: ", end='') expectedValueType.print_it() print("") # newline print("Found: ", end='') typeResult.print_it() print("") # newline return (False, None) if not beSilentFlag: expr.values = newValues expr.inferredType = inferredTypeFromBelow.create_copy() return (inferredTypeFromBelow.create_copy(), expr) else: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Type mismatch. Did not expect an array expression here.") print("Expected: ", end='') inferredTypeFromBelow.print_it() print("") # newline return (False, None) elif isinstance(expr, NArrayExpressionNoInitialization): typeResult, exprResult = type_infer_and_annotate_expression( expr.length, NISizeType(expr.lineNr, expr.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, beSilentFlag ) if typeResult == False: return (False, None) if isinstance(inferredTypeFromBelow, NUnknownType): if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Unable to infer exact type of array expression. Please type specify.") return (False, None) elif isinstance(inferredTypeFromBelow, NDynamicArrayType): if not beSilentFlag: expr.length = exprResult expr.inferredType = inferredTypeFromBelow.create_copy() return (inferredTypeFromBelow.create_copy(), expr) else: # If it is a variant-box type directly or indirectly, we don't have enough info on this expr construct: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Type mismatch, or alternatively not enough type information (perhaps you can specify the type).") print("Expected: ", end='') inferredTypeFromBelow.print_it() print("") # newline return (False, None) elif isinstance(expr, NArrayExpressionRepeatedValue): if isinstance(inferredTypeFromBelow, NUnknownType): typeResult, exprResult = type_infer_and_annotate_expression( expr.repeatedValue, NUnknownType(expr.repeatedValue.lineNr, expr.repeatedValue.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, beSilentFlag ) if typeResult == False: return (False, None) lenTypeResult, lenExprResult = type_infer_and_annotate_expression( expr.length, NISizeType(expr.length.lineNr, expr.length.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, beSilentFlag ) if lenTypeResult == False: return (False, None) if not beSilentFlag: expr.repeatedValue = exprResult expr.length = lenExprResult expr.inferredType = NDynamicArrayType(expr.lineNr, expr.rowNr, typeResult.create_copy()) return (expr.inferredType.create_copy(), expr) elif isinstance(inferredTypeFromBelow, NDynamicArrayType): expectedIndexType = concretize(inferredTypeFromBelow.valueType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) typeResult, exprResult = type_infer_and_annotate_expression( expr.repeatedValue, expectedIndexType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, beSilentFlag ) if typeResult == False: return (False, None) lenTypeResult, lenExprResult = type_infer_and_annotate_expression( expr.length, NISizeType(expr.length.lineNr, expr.length.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, beSilentFlag ) if lenTypeResult == False: return (False, None) if not beSilentFlag: expr.repeatedValue = exprResult expr.length = lenExprResult expr.inferredType = NDynamicArrayType(expr.lineNr, expr.rowNr, concretize(typeResult, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict)) return (expr.inferredType.create_copy(), expr) else: if not beSilentFlag: util.log_error(expr.lineNr, expr.rowNr, "Type mismatch. Did not expect an array expression here") print("Expected: ", end='') inferredTypeFromBelow.print_it() print("") # newline return (False, None) elif isinstance(expr, NStructExpression): if isinstance(inferredTypeFromBelow, NUnknownType): # TODO util.log_error(expr.lineNr, expr.rowNr, "Type checking struct expressions NOT IMPLEMENTED YET.") return (False, None) elif isinstance(inferredTypeFromBelow, NStructType): # TODO util.log_error(expr.lineNr, expr.rowNr, "Type checking struct expressions NOT IMPLEMENTED YET.") return (False, None) else: # TODO util.log_error(expr.lineNr, expr.rowNr, "Type checking struct expressions NOT IMPLEMENTED YET.") return (False, None) elif isinstance(expr, NVariantBoxExpression): # TODO util.log_error(expr.lineNr, expr.rowNr, "Type checking variant-box expressions NOT IMPLEMENTED YET.") return (False, None) elif isinstance(expr, NTypeClarifiedExpression): # TODO util.log_error(expr.lineNr, expr.rowNr, "Type checking type clarified expressions NOT IMPLEMENTED YET.") return (False, None) elif isinstance(expr, NArrayIndexing): # TODO util.log_error(expr.lineNr, expr.rowNr, "Type checking general array indexing expressions NOT IMPLEMENTED YET.") return (False, None) elif isinstance(expr, NStructIndexing): # TODO util.log_error(expr.lineNr, expr.rowNr, "Type checking general struct indexing expressions NOT IMPLEMENTED YET.") return (False, None) elif isinstance(expr, NVariantBoxCastExpression): # TODO util.log_error(expr.lineNr, expr.rowNr, "Type checking variant-box cast expressions NOT IMPLEMENTED YET.") return (False, None) elif isinstance(expr, NAndSymbolExpression): # TODO util.log_error(expr.lineNr, expr.rowNr, "Type checking '&&' expressions NOT IMPLEMENTED YET.") return (False, None) elif isinstance(expr, NOrSymbolExpression): # TODO util.log_error(expr.lineNr, expr.rowNr, "Type checking '||' expressions NOT IMPLEMENTED YET.") return (False, None) elif isinstance(expr, NEndExpression): util.log_error(expr.lineNr, expr.rowNr, "SHOULD NOT HAPPEN: type inference encountered unexpanded end expression...") return (False, None) elif isinstance(expr, NFunctionCall): # TODO util.log_error(expr.lineNr, expr.rowNr, "Type checking function call expressions NOT IMPLEMENTED YET.") return (False, None) elif isinstance(expr, NIFExpression): # TODO util.log_error(expr.lineNr, expr.rowNr, "Type checking 'IF' expressions NOT IMPLEMENTED YET.") return (False, None) elif isinstance(expr, NSWITCHExpression): # TODO util.log_error(expr.lineNr, expr.rowNr, "Type checking 'SWITCH' expressions NOT IMPLEMENTED YET.") return (False, None) elif isinstance(expr, NCONTENTTYPEExpression): # TODO util.log_error(expr.lineNr, expr.rowNr, "Type checking 'STORETYPE' expressions NOT IMPLEMENTED YET.") return (False, None) else: util.log_error(expr.lineNr, expr.rowNr, "Type inference strangely encountered unknown expression.") return (False, None) # Returns True, False, "several", or "vbox"... # This version of match_as_below can also match against NUnknownType, # And returns "vbox" if its possible to vbox to that type case to match, # or returns "several" if there are several possible vboxing cases, which is practically impossible though # A bit hackish, yes... def extended_match_as_below(t, inferredMatchType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict): if isinstance(t, NUnknownType): return True else: matchResult = match_as_below(t, inferredMatchType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) if matchResult: return matchResult else: if isinstance(t, NVariantBoxType): theFound = False for typ in t.types: matchRes = match_as_below(typ, inferredMatchType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) if matchRes: if theFound != False: # several possible matches -- does this really happen -- hardly since all extensions are distinct... return "several" else: theFound = typ.create_copy() return theFound else: return False # This version is simply extended with match against NUnknownType. def unknownextended_match_as_below(t, inferredMatchType, typeDict, directlyImportedTypesDictDict, otherImportedMoudulesTypeDictDict): if isinstance(t, NUnknownType): return True else: return match_as_below(t, inferredMatchType, typeDict, directlyImportedTypesDictDict, otherImportedMoudulesTypeDictDict) def run_pass( programAST, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDict, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict ): funDictStack = [funDict] blockNumberList = [] for statement in programAST.statements: success = type_check_statement( statement, blockNumberList, 0, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, None ) if success == False: return False return True def type_check_statement( statement, blockNumberList, depth, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, currentReturnTypes ): if isinstance(statement, NRefToFunctionDeclarationWithDefinition): actual = statement.funs[statement.funsIndex] if len(blockNumberList) <= depth: for i in range(len(blockNumberList), depth + 1): blockNumberList.append(0) else: blockNumberList[depth] += 1 funcEntryList = funDictStack[len(funDictStack) - 1][actual.name.name].funEntries funcEntry = None # dummy for fentry in funcEntryList: if fentry.mangledName == actual.mangledName: funcEntry = fentry # we better find it... below code assumes != None for stmt in actual.body.statements: success = type_check_statement( stmt, blockNumberList, depth + 1, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack + [funcEntry.localDict], builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, funcEntry.signature.returnTypes ) if success == False: return False return True elif isinstance(statement, NBlock): if len(blockNumberList) <= depth: for i in range(len(blockNumberList), depth + 1): blockNumberList.append(0) else: blockNumberList[depth] += 1 for stmt in statement.statements: success = type_check_statement( stmt, blockNumberList, depth + 1, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack + [funDictStack[len(funDictStack) - 1][str(blockNumberList[depth])].localDict], builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, currentReturnTypes ) if success == False: return False return True elif isinstance(statement, NIfStatement): typeResult, exprResult = type_infer_and_annotate_expression( statement.condition, NUnknownType(statement.condition.lineNr, statement.condition.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: statement.condition = exprResult success = type_check_statement( statement.ifBlock, blockNumberList, depth, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, currentReturnTypes ) if success == False: return False for elseIfClause in statement.elseIfClauses: typeResult, exprResult = type_infer_and_annotate_expression( elseIfClause.condition, NUnknownType(statement.condition.lineNr, statement.condition.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: elseIfClause.condition = exprResult success = type_check_statement( elseIfClause.block, blockNumberList, depth, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, currentReturnTypes ) if success == False: return False if not statement.elseBlockOrNull is None: success = type_check_statement( statement.elseBlockOrNull, blockNumberList, depth, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, currentReturnTypes ) if success == False: return False return True elif isinstance(statement, NRefToTemplateDeclarationWithDefinition): return True # don't go further here I guess... elif isinstance(statement, NLoopStatement): success = type_check_statement( statement.block, blockNumberList, depth, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, currentReturnTypes ) if success == False: return False return True elif isinstance(statement, NForStatement): if not statement.rangeOrNull is None: statement.rangeOrNull.counterType = concretize(statement.rangeOrNull.counterType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) typeResult, exprResult = type_infer_and_annotate_expression( statement.rangeOrNull.rangeFrom, statement.rangeOrNull.counterType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: statement.rangeOrNull.rangeFrom = exprResult typeResult, exprResult = type_infer_and_annotate_expression( statement.rangeOrNull.rangeTo, statement.rangeOrNull.counterType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: statement.rangeOrNull.rangeTo = exprResult for iteration in statement.iterations: if isinstance(iteration, NIterationIn): itType = NUnknownType(iteration.lineNr, iteration.rowNr) arrayType = NUnknownType(iteration.lineNr, iteration.rowNr) if not iteration.itTypeOrNull is None: itType = concretize(iteration.itTypeOrNull, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) # (not sure concretization is needed here but cannot hurt...) arrayType = NDynamicArrayType(iteration.lineNr, iteration.rowNr, itType.create_copy()) typeResult, exprResult = type_infer_and_annotate_expression( iteration.arrayExpression, arrayType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: iteration.arrayExpression = exprResult # now set the name into block's type... dictOfTheBodyBlock = funDictStack[len(funDictStack) - 1][statement.block.blockEntryNumStr].localDict concr = concretize(iteration.arrayExpression.inferredType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) if not isinstance(concr, NDynamicArrayType): util.log_error(concr.lineNr, concr.rowNr, "Weird type error. Should probably not happen") return False dictOfTheBodyBlock[iteration.itName.name].theType = concr.valueType.create_copy() else: # NIterationOver hopefully... itType = NUnknownType(iteration.lineNr, iteration.rowNr) arrayType = NUnknownType(iteration.lineNr, iteration.rowNr) if not iteration.itTypeOrNull is None: itType = concretize(iteration.itTypeOrNull, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) arrayType = NDynamicArrayType(iteration.lineNr, iteration.rowNr, itType.create_copy()) typeResult, exprResult = type_infer_and_annotate_expression( iteration.arrayLValue.lValueExpression, arrayType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: iteration.arrayLValue.lValueExpression = exprResult # now set the name into block's type... dictOfTheBodyBlock = funDictStack[len(funDictStack) - 1][statement.block.blockEntryNumStr].localDict concr = concretize(iteration.arrayLValue.lValueExpression.inferredType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) if not isinstance(concr, NDynamicArrayType): util.log_error(concr.lineNr, concr.rowNr, "Weird type error. Should probably not happen") return False dictOfTheBodyBlock[iteration.itName.name].theType = concr.valueType.create_copy() # the rest can in Python be done for both iteration classes at the same time: if not iteration.indexfactorOrNull is None: typeResult, exprResult = type_infer_and_annotate_expression( iteration.indexfactorOrNull, NISizeType(iteration.lineNr, iteration.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: iteration.indexfactorOrNull = exprResult if not iteration.indexoffsetOrNull is None: typeResult, exprResult = type_infer_and_annotate_expression( iteration.indexoffsetOrNull, NISizeType(iteration.lineNr, iteration.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: iteration.indexoffsetOrNull = exprResult success = type_check_statement( statement.block, blockNumberList, depth, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, currentReturnTypes ) if success == False: return False return True elif isinstance(statement, NSwitchStatement): typeResult, exprResult = type_infer_and_annotate_expression( statement.switchValue, NUnknownType(statement.switchValue.lineNr, statement.switchValue.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: statement.switchValue = exprResult for case in statement.cases: for caseValue in case.caseValues: typeResult, exprResult = type_infer_and_annotate_expression( caseValue, statement.switchValue.inferredType, # this will allow vboxing here though... typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: pass # we actually don't have to set it here actually... TODO UNLESS VBOXING.....!!!!!! which should not be here TODO success = type_check_statement( case.block, blockNumberList, depth, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, currentReturnTypes ) if success == False: return False if not statement.defaultCaseOrNull is None: success = type_check_statement( statement.defaultCaseOrNull, blockNumberList, depth, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, currentReturnTypes ) if success == False: return False return True elif isinstance(statement, NContenttypeStatement): # TODO util.log_error(statement.lineNr, statement.rowNr, "Typechecking/expanding 'storetype' statements is NOT YET IMPLEMENTED.") return False elif isinstance(statement, NNormalAssignment): if len(statement.leftHandSide) == 1: lhsType = None if isinstance(statement.leftHandSide[0], NVariableDeclaration): statement.leftHandSide[0].theType = concretize(statement.leftHandSide[0].theType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) lhsType = statement.leftHandSide[0].theType.create_copy() else: # LValueContainer hopefully... typeResult, exprResult = type_infer_and_annotate_expression( statement.leftHandSide[0].lValueExpression, NUnknownType(statement.lineNr, statement.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: statement.leftHandSide[0].lValueExpression = exprResult lhsType = concretize(typeResult.create_copy(), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) # can never hurt with possibly superfluous concretize... typeResult, exprResult = type_infer_and_annotate_expression( statement.value, lhsType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: statement.value = exprResult return True else: # TODO util.log_error(statement.lineNr, statement.rowNr, "Typechecking multiple assignment NOT YET IMPLEMENTED.") return False return True elif (isinstance(statement, NModuloAssignment) or isinstance(statement, NMultiplicationAssignment) or isinstance(statement, NDivisionAssignment) or isinstance(statement, NAdditionAssignment) or isinstance(statement, NSubtractionAssignment) ): if len(statement.leftHandSide) == 1: typeResult, exprResult = type_infer_and_annotate_expression( statement.leftHandSide[0].lValueExpression, NUnknownType(statement.lineNr, statement.rowNr), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: statement.leftHandSide[0].lValueExpression = exprResult lhsType = concretize(typeResult.create_copy(), typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict) typeResult, exprResult = type_infer_and_annotate_expression( statement.value, lhsType, typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: statement.value = exprResult return True else: util.log_error(statement.lineNr, statement.rowNr, "Typechecking multiple compound assignment NOT YET IMPLEMENTED.") return False elif isinstance(statement, NReturnStatement): if currentReturnTypes is None: util.log_error(statement.lineNr, statement.rowNr, "Type checking pass found usage of a return statement outside any function declaration.") return False if len(currentReturnTypes) != len(statement.returnExpressions): util.log_error(statement.lineNr, statement.rowNr, "Wrong number of return arguments to return statement.") return False for i in range(0, len(currentReturnTypes)): typeResult, exprResult = type_infer_and_annotate_expression( statement.returnExpressions[i], currentReturnTypes[i], typeDict, directlyImportedTypesDictDict, otherImportedModulesTypeDictDict, funDictStack, builtInFunsDict, directlyImportedFunsDictDict, otherImportedModulesFunDictDict, False ) if typeResult == False: return False else: pass # no problem here return True elif isinstance(statement, NFunctionCallStatement): # TODO util.log_error(statement.lineNr, statement.rowNr, "Typechecking function call statements is NOT YET IMPLEMENTED.") return False else: return True
LJMNilsson/memtran
src/type_inference_and_annotation.py
Python
gpl-3.0
78,047
[ "VisIt" ]
87d48a97d1cb4d78c8c982bf8d02d5a523cf1de6702d03f5802350fd292e661c
# pylint: disable-msg=I0011,C0301,W0611 # copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr # copyright 2003-2010 Sylvain Thenault, all rights reserved. # contact mailto:thenault@gmail.com # # This file is part of logilab-astng. # # logilab-astng is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 2.1 of the License, or (at your # option) any later version. # # logilab-astng is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License # for more details. # # You should have received a copy of the GNU Lesser General Public License along # with logilab-astng. If not, see <http://www.gnu.org/licenses/>. """I found some of my scripts trigger off an AttributeError in pylint 0.8.1 (with common 0.12.0 and astng 0.13.1). Traceback (most recent call last): File "/usr/bin/pylint", line 4, in ? lint.Run(sys.argv[1:]) File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 729, in __init__ linter.check(args) File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 412, in check self.check_file(filepath, modname, checkers) File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 426, in check_file astng = self._check_file(filepath, modname, checkers) File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 450, in _check_file self.check_astng_module(astng, checkers) File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 494, in check_astng_module self.astng_events(astng, [checker for checker in checkers File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 511, in astng_events self.astng_events(child, checkers, _reversed_checkers) File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 511, in astng_events self.astng_events(child, checkers, _reversed_checkers) File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 508, in astng_events checker.visit(astng) File "/usr/lib/python2.4/site-packages/logilab/astng/utils.py", line 84, in visit method(node) File "/usr/lib/python2.4/site-packages/pylint/checkers/variables.py", line 295, in visit_import self._check_module_attrs(node, module, name_parts[1:]) File "/usr/lib/python2.4/site-packages/pylint/checkers/variables.py", line 357, in _check_module_attrs self.add_message('E0611', args=(name, module.name), AttributeError: Import instance has no attribute 'name' You can reproduce it by: (1) create package structure like the following: package/ __init__.py subpackage/ __init__.py module.py (2) in package/__init__.py write: import subpackage (3) run pylint with a script importing package.subpackage.module. """ __revision__ = '$Id: import_package_subpackage_module.py,v 1.1 2005-11-10 15:59:32 syt Exp $' import package.subpackage.module
dbbhattacharya/kitsune
vendor/packages/logilab-astng/test/regrtest_data/import_package_subpackage_module.py
Python
bsd-3-clause
3,147
[ "VisIt" ]
445c69c8f20bd2b2e1155108bacbee6d347f89f180776e0231e81d45f318c76e
#! /usr/bin/env python from __future__ import unicode_literals # coding: utf-8 # Use Gzipped files without extractionpyp import shutil import logging import datetime import os # operation system packages import ConfigParser as configparser # a package to parse INI file or confige file. import argparse # a package to parse commandline arguments. import sys from re import sub import gzip from subprocess import call # to run command line scripts from subprocess import Popen, PIPE, check_output from multiprocessing.dummy import Pool as Pool __version__ = '0.2.7.2' __author__ = "Attayeb Mohsen" __date__ = "23/1/2019" ## 30/8/2018 add primer-length parameter ## 23/1/2019 correct a merging bug ## add start_at_chimera_removal option starting_message = """ Microbiome analysis using multiple methods Version: %s Date: %s National Institutes of Biomedical Innovation, Health, and Nutrition\n""" % \ (__version__, __date__) ID = str(datetime.datetime.now()) ID = ID.replace(" ", "") ID = ID.replace(":", "") ID = ID.replace(".", "") ID = ID.replace("-", "") ID = ID[0:14] PR = dict({"id": ID}) # PARAMETERS dict def remove_short_reads(infqfile, outfqfile, length): """ :param infqfile: input fastq file name. :type infqfile: str :param outfqfile: output fastq file name, after removing short reads. :type outfqfile: str :param length: minimum reads length. :type length: int :rtype: None :return: None @Action: filter fastq files removing short reads """ infq = open(infqfile, "r") outfq = open(outfqfile, "w") lines = infq.readlines() for a, b, c, d in zip(lines[0::4], lines[1::4], lines[2::4], lines[3::4]): if len(b) > length: outfq.write(a) outfq.write(b) outfq.write(c) outfq.write(d) infq.close() outfq.close() def asfolder(folder): """ Add "/" at the end of the folder if not inserted :param folder: the folder name :type folder: str :return: file names with / at the end :rtype: str """ if folder[-1] != "/": return (folder + "/") else: return (folder) def execute(command, shell=True): """ Execute command using os package and return output to log file :param command: The command to be executed :type command: str :param shell: Takes either True or False :type shell: boolean :return: Run the command in the background and save the output to the logging file. """ loginfo(command) p = Popen(command.split(), stderr=PIPE, stdout=PIPE) output, error = p.communicate() if output != b"": loginfo(output.encode('utf-8')) if error != b"": logwarning(error.encode('utf-8')) def loginfo(message): """ save information to log file :param message: saved to log file :type message: str :return: """ logging.info(message.encode('utf-8')) def logwarning(message): logging.warning(message.encode('utf-8')) def get_configuration(): global PR cp = configparser.ConfigParser() cp.read(PR['ConfigFile']) PR['Ftrimmed'] = asfolder(cp.get('FOLDERS', 'trimmed')) PR['Fmerged'] = asfolder(cp.get('FOLDERS', 'merged')) PR['Fqc'] = asfolder(cp.get('FOLDERS', 'quality_step')) PR['Fchi'] = asfolder(cp.get('FOLDERS', 'chimera_removed')) PR['Fotus'] = asfolder(cp.get('FOLDERS', 'otus')) PR['Fdiv'] = asfolder(cp.get('FOLDERS', 'diversity_analyses')) PR['Fothers'] = asfolder(cp.get('FOLDERS', 'others')) PR['number_of_cores'] = int(cp.get('GENERAL', 'jobs_to_start')) PR['silva_taxonomy'] = cp.get('SILVA', 'taxonomy') PR['silva_reference_seqs'] = cp.get('SILVA', 'reference_seqs') PR['silva_core_alignment'] = cp.get('SILVA', 'core_alignment') PR['silva_chim_ref'] = cp.get('CHIMERA', 'silva') PR['gg_taxonomy'] = cp.get('GG', 'taxonomy') PR['gg_reference_seqs'] = cp.get('GG', 'reference_seqs') PR['gg_core_alignment'] = cp.get('GG', 'core_alignment') PR['gg_chim_ref'] = cp.get('CHIMERA', 'gg') PR['unite_taxonomy'] = cp.get('UNITE', 'taxonomy') PR['unite_reference_seqs'] = cp.get('UNITE', 'reference_seqs') PR['similarity'] = cp.get('GENERAL', 'similarity') PR['blast_e_value'] = cp.get('GENERAL', 'blast_e_value') PR['bbmap_resources'] = cp.get('bbmap', 'resources') def locate_bbmap(): """ locate the folder of bbmap :return: """ folder = check_output(["locate", "bbmerge.sh"]).decode("utf-8") return (sub('bbmerge.sh\n$', '', folder)) def check_before_start(): """ :return: """ if os.path.isfile(PR['ConfigFile']): pass else: raise IOError("configuration file does not exist") if PR['rdb'] == "silva": condition = True condition = condition and os.path.isfile(PR['silva_taxonomy']) condition = condition and os.path.isfile(PR['silva_reference_seqs']) condition = condition and os.path.isfile(PR['silva_core_alignment']) if not condition: raise IOError("Can not find Silva database files, " "please check the configuration file: %s " "to set up the correct folder" % PR['ConfigFile']) if PR['rdb'] == "gg": condition = True condition = condition and os.path.isfile(PR['gg_taxonomy']) condition = condition and os.path.isfile(PR['gg_reference_seqs']) condition = condition and os.path.isfile(PR['gg_core_alignment']) if not condition: raise IOError("Can not find greengenes database files, " "please check the configuration file: %s to set up the correct folder" % PR['ConfigFile']) if os.path.isdir(PR['out_folder']): raise IOError("Output folder exists, Please use a non existent folder name") def write_parameter_file(parameter_file): """ :param parameter_file: :return: """ if PR['rdb'] == "silva": parameter_string = """ assign_taxonomy:id_to_taxonomy_fp\t%(taxonomy)s assign_taxonomy:reference_seqs_fp\t%(reference_seqs)s pick_otus.py:pick_otus_reference_seqs_fp\t%(reference_seqs)s pick_otus:enable_rev_strand_match True filter_alignment.py:pynast_template_alignment_fp\t%(core_alignment)s parallel:jobs_to_start\t%(jobs_to_start)d assign_taxonomy:similarity\t%(similarity)s """ % {'taxonomy': PR['silva_taxonomy'], 'reference_seqs': PR['silva_reference_seqs'], 'core_alignment': PR['silva_core_alignment'], 'jobs_to_start': PR['number_of_cores'], 'similarity': PR['similarity']} elif PR['rdb'] == "unite": # pass parameter_string = """ assign_taxonomy:id_to_taxonomy_fp\t%(taxonomy)s assign_taxonomy:reference_seqs_fp\t%(reference_seqs)s pick_otus.py:pick_otus_reference_seqs_fp\t%(reference_seqs)s parallel:jobs_to_start\t%(jobs_to_start)d assign_taxonomy:assignment_method blast # should we use e_value or blast_e_value parallel_assign_taxonomy_blast:e_value\t%(blast_e_value)s # comment """ % {'taxonomy': PR['unite_taxonomy'], 'reference_seqs': PR['unite_reference_seqs'], 'jobs_to_start': PR['number_of_cores'], 'blast_e_value': PR['blast_e_value']} else: parameter_string = ''' assign_taxonomy:id_to_taxonomy_fp\t%(taxonomy)s assign_taxonomy:reference_seqs_fp\t%(reference_seqs)s pick_otus.py:pick_otus_reference_seqs_fp\t%(reference_seqs)s pick_otus:enable_rev_strand_match True filter_alignment.py:pynast_template_alignment_fp\t%(core_alignment)s parallel:jobs_to_start\t%(jobs_to_start)d assign_taxonomy:similarity\t%(similarity)s ''' % {'taxonomy': PR['gg_taxonomy'], 'reference_seqs': PR['gg_reference_seqs'], 'core_alignment': PR['gg_core_alignment'], 'jobs_to_start': PR['number_of_cores'], 'similarity': PR['similarity']} if os.path.exists(PR['others']): pass else: os.mkdir(PR['others']) f = open(parameter_file, "w") f.write(parameter_string) f.close() #def copyfilesanddecompress(inFolder, outFolder): # shutil.copytree(asfolder(inFolder), asfolder(outFolder)) # print('copying files') # execute("gunzip %s*.gz"%asfolder(outFolder)) # print('decompress files') def primertrim(infqfile, outfqfile, length): """ :param infqfile: :param outfqfile: :param length: :return: """ if infqfile.endswith(".gz"): infq = gzip.open(infqfile, "r") else: infq = open(infqfile, "r") if outfqfile.endswith(".gz"): outfq = gzip.open(outfqfile, "w") else: outfq = open(outfqfile, "w") lines = infq.readlines() for a, b, c, d in zip(lines[0::4], lines[1::4], lines[2::4], lines[3::4]): outfq.write(a) outfq.write(b[length:]) outfq.write(c) outfq.write(d[length:]) infq.close() outfq.close() def trimfolder(inFolder, outFolder, trimq, ftrim=True): """ """ import os inFolder = asfolder(inFolder) outFolder = asfolder(outFolder) files = os.listdir(inFolder) files.sort() ins1 = [x for x in files if "_R1_" in x] ins2 = [x.replace("_R1_", "_R2_") for x in ins1] os.mkdir(outFolder) # call("mkdir -p %s" % out_folder, shell=True) print("Trimming...") # get_ipython().system(u'mkdir -p {out_folder}') def process(i): in1 = inFolder + ins1[i] in2 = inFolder + ins2[i] print("\n%s and %s" % (ins1[i], ins2[i])) out1 = outFolder + ins1[i] out2 = outFolder + ins2[i] out1_temp1 = outFolder + "temp1_" + ins1[i] out2_temp1 = outFolder + "temp1_" + ins2[i] # forctrimleft was added if ftrim: primertrim(in1, out1_temp1, PR['primertrim_forward']) primertrim(in2, out2_temp1, PR['primertrim_reverse']) else: out1_temp1 = in1 out2_temp1 = in2 if PR['adapter_ref'] != None: execute( "bbduk.sh -Xmx1000m -in1=%s -in2=%s -out1=%s -out2=%s -outm=stdout.fa -ref=%s -qtrim=r -trimq=%d -k=18 -ktrim=f" % (out1_temp1, out2_temp1, out1, out2, PR['adapter_ref'], trimq), shell=True) else: execute( "bbduk.sh -Xmx1000m -in1=%s -in2=%s -out1=%s -out2=%s -qtrim=r -trimq=%d" % (out1_temp1, out2_temp1, out1, out2, trimq), shell=True) os.remove(out1_temp1) os.remove(out2_temp1) p = Pool(PR['number_of_cores']) p.map(process, range(len(ins1))) def mergefolderbb(inFolder, outFolder, maxloose=True): """ """ inFolder = asfolder(inFolder) outFolder = asfolder(outFolder) import os files = os.listdir(inFolder) files.sort() ins1 = [x for x in files if "_R1_" in x] ins2 = [x.replace("_R1_", "_R2_") for x in ins1] outs = [x.replace("_L001_R1_001", "") for x in ins1] os.mkdir(outFolder) print("\nMerging ...") def process(i): in1 = inFolder + ins1[i] in2 = inFolder + ins2[i] print("%s and %s" % (ins1[i], ins2[i])) out = outFolder + outs[i] if maxloose: execute("bbmerge.sh -in1=%s -in2=%s -out=%s -maxloose=t -ignorebadquality" % (in1, in2, out), shell=True) else: execute("bbmerge.sh -in1=%s -in2=%s -out=%s -ignorebadquality" % (in1, in2, out), shell=True) if PR['remove_intermediate']: os.remove(in1) os.remove(in2) p = Pool(PR['number_of_cores']) p.map(process, range(len(ins1))) if PR['remove_intermediate']: os.removedirs(inFolder) print("Merging finished.") def mergefolder(inFolder, outFolder, pp): """ """ global PR inFolder = asfolder(inFolder) outFolder = asfolder(outFolder) files = os.listdir(inFolder) files.sort() ins1 = [x for x in files if "_R1_" in x] ins2 = [x.replace("_R1_", "_R2_") for x in ins1] outs = [x.replace("_L001_R1_001", "") for x in ins1] os.mkdir(outFolder) def process(i): in1 = inFolder + ins1[i] in2 = inFolder + ins2[i] print("Merging: %s and %s " % (ins1[i], ins2[i])) out = outFolder + "temp_" + outs[i] out_final = outFolder + outs[i] if out_final.endswith(".gz"): out_final = sub(".gz", "", out_final) execute("fastq-join -p %d %s %s -o %s" % (pp, in1, in2, out), shell=True) os.remove("%sun1" % out) os.remove("%sun2" % out) os.rename("%sjoin" % out, out) remove_short_reads(out, out_final, PR['minimum_length']) os.remove(out) if PR['remove_intermediate']: os.remove(in1) os.remove(in2) p = Pool(PR['number_of_cores']) p.map(process, range(len(ins1))) if PR['remove_intermediate']: os.removedirs(inFolder) def qualitycontrol(inFolder, outFolder, q): """ """ inFolder = asfolder(inFolder) outFolder = asfolder(outFolder) import os files = os.listdir(inFolder) files.sort() os.mkdir(outFolder) # call("mkdir -p %s " % out_folder, shell=True) def process(i): temp = outFolder + "temp" + i + "/" print("\nQuality control: %s" % i) sampleId = i.replace(".fastq", "") inFile = inFolder + i outFile = outFolder + i.replace(".fastq", ".fasta") execute("""split_libraries_fastq.py -i %s -o %s --barcode_type not-barcoded --sample_ids %s -q %s""" % ( inFile, temp, sampleId, q), shell=True) tempFile = temp + "seqs.fna" call("mv %s %s" % (tempFile, outFile), shell=True) call("rm -r %s" % temp, shell=True) if PR['remove_intermediate']: os.remove(inFile) p = Pool(PR['number_of_cores']) p.map(process, files) print("Quality control finished.") if PR['remove_intermediate']: os.removedirs(inFolder) def removechimera(inFolder, outFolder, rdb="silva"): """ """ global PR import os inFolder = asfolder(inFolder) outFolder = asfolder(outFolder) files = os.listdir(inFolder) files.sort() os.mkdir(outFolder) # call("mkdir -p %s" % out_folder, shell=True) def process(i): print("Chimera removal: %s" % i) temp = outFolder + "temp" + i + "/" if rdb == "silva": execute("identify_chimeric_seqs.py -i %s -m usearch61 -o %s -r %s" % (inFolder + i, temp + i, PR['silva_chim_ref']), shell=True) else: execute("identify_chimeric_seqs.py -i %s -m usearch61 -o %s -r %s" % ( inFolder + i, temp + i, PR['gg_chim_ref']), shell=True) execute("filter_fasta.py -f %s -o %s -s %s/non_chimeras.txt" % (inFolder + i, outFolder + i, temp + i), shell=True) call("rm -r %s" % temp, shell=True) if PR['remove_intermediate']: os.remove(inFolder+i) p = Pool(PR['number_of_cores']) p.map(process, files) if PR['remove_intermediate']: os.removedirs(inFolder) def pickotus(inFolder, outFolder, rdb="silva", fungus=False): """ """ # TODO : add no parallel option global PR inFolder = asfolder(inFolder) outFolder = asfolder(outFolder) inFolder_fasta = inFolder + "*.fasta" print("Otu picking...") if PR['np']: parallel_string = "" else: parallel_string = "-a -O %d" % PR['number_of_cores'] if PR['c_ref'] != "none": if rdb == "silva": execute("pick_open_reference_otus.py -i %s -o %s -p %s -r %s %s -n %s" % ( inFolder_fasta, outFolder, PR['parameter_file_name'], PR['c_ref'], parallel_string, PR['c_otu_id']), shell=True) #execute("filter_otus_from_otu_table.py -i %s -o %s --negate_ids_to_exclude -e %s" # % (out_folder + "otu_table_mc2_w_tax_no_pynast_failures.biom", # out_folder + "otu_table_mc2_w_tax_no_pynast_failures_close_reference.biom", # PR['silva_reference_seqs']), shell=True) elif fungus: execute("pick_open_reference_otus.py -i %s -o %s -p %s %s -n %s --suppress_align_and_tree" % (inFolder_fasta, outFolder, PR['parameter_file_name'], parallel_string, PR['c_otu_id']), shell=True) else: execute("pick_open_reference_otus.py -i %s -o %s -r %s -p %s %s -n %s" % (inFolder_fasta, outFolder, PR['c_ref'], PR['parameter_file_name'], parallel_string, PR['c_otu_id']), shell=True) #execute("filter_otus_from_otu_table.py -i %s -o %s --negate_ids_to_exclude -e %s" # % (out_folder + "otu_table_mc2_w_tax_no_pynast_failures.biom", # out_folder + "otu_table_mc2_w_tax_no_pynast_failures_close_reference.biom", # PR['gg_reference_seqs']), shell=True) else: if rdb == "silva": execute("pick_open_reference_otus.py -i %s -o %s -p %s -r %s %s -n %s" % (inFolder_fasta, outFolder, PR['parameter_file_name'], PR['silva_reference_seqs'], parallel_string, PR['c_otu_id']), shell=True) execute("filter_otus_from_otu_table.py -i %s -o %s --negate_ids_to_exclude -e %s" % (outFolder + "otu_table_mc2_w_tax_no_pynast_failures.biom", outFolder + "otu_table_mc2_w_tax_no_pynast_failures_close_reference.biom", PR['silva_reference_seqs']), shell=True) elif fungus: execute("pick_open_reference_otus.py -i %s -o %s -p %s %s -n %s--suppress_align_and_tree" % (inFolder_fasta, outFolder, PR['parameter_file_name'], parallel_string, PR['c_otu_id']), shell=True) else: execute("pick_open_reference_otus.py -i %s -o %s -r %s -p %s -n %s" % (inFolder_fasta, outFolder, PR['gg_reference_seqs'], PR['parameter_file_name'], parallel_string, PR['c_otu_id']), shell=True) execute("filter_otus_from_otu_table.py -i %s -o %s --negate_ids_to_exclude -e %s" % (outFolder + "otu_table_mc2_w_tax_no_pynast_failures.biom", outFolder + "otu_table_mc2_w_tax_no_pynast_failures_close_reference.biom", PR['gg_reference_seqs']), shell=True) if PR['remove_intermediate']: os.removedirs(inFolder) def writedf(outFile, ids, sampleIds): f = open(outFile, "w+") f.write("#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tRead\tFile\tDescription\n") for x in range(len(ids)): f.write("%s\t\t\tR1\t%s\tsingle_file\n" % (ids[x], sampleIds[x])) f.close() def create_map(inFolder, outFile): """ """ inFolder = asfolder(inFolder) print("Writing mapping file") import os sampleIds = os.listdir(inFolder) ids = [x.replace(".fasta", "") for x in sampleIds] ids = [x.split("_")[0] for x in ids] d = {'#SampleID': ids} writedf(outFile, ids, sampleIds) def corediv(inFolder, outFolder, mappingFile, depth): """ """ print("Core diversity analyses...") inFolder = asfolder(inFolder) outFolder = asfolder(outFolder) if PR['fungus']: biom = inFolder + "otu_table_mc2_w_tax.biom" else: biom = inFolder + "otu_table_mc2_w_tax_no_pynast_failures.biom" tree = inFolder + "rep_set.tre" # get_ipython().system( # u'core_diversity_analyses.py -i {biom} -o {out_folder} -m {mapping_file} -t {tree} -e {depth}') if PR['fungus']: execute("core_diversity_analyses.py -i %s -o %s -m %s -e %d --nonphylogenetic_diversity" % ( biom, outFolder, mappingFile, depth), shell=True) else: execute( "core_diversity_analyses.py -i %s -o %s -m %s -t %s -e %d" % (biom, outFolder, mappingFile, tree, depth), shell=True) def full_analysis(inFolder, outFolder, depth, rdb, trimq, joining_method, qcq, maxloose, fastq_p): global PR """ """ trimmed = asfolder(outFolder + PR['Ftrimmed']) merged = asfolder(outFolder + PR['Fmerged']) qc = asfolder(outFolder + PR['Fqc']) chi = asfolder(outFolder + PR['Fchi']) otus = asfolder(outFolder + PR['Fotus']) div = asfolder(outFolder + PR['Fdiv']) trimfolder(inFolder, trimmed, trimq) if joining_method == "fastq-join": mergefolderfastq(trimmed, merged, fastq_p) elif joining_method == "bbmerge": mergefolderbb(trimmed, merged, maxloose=maxloose) else: raise ("Wrong method") qualitycontrol(merged, qc, qcq) removechimera(qc, chi, rdb) pickotus(chi, otus, rdb) # here if create_mapping_file: create_map(qc, PR['mapping_file']) corediv(otus, div, PR['mapping_file'], depth) def stop_at_merging(inFolder, outFolder, trimq, joining_method, maxloose, fastq_p): global PR trimmed = asfolder(outFolder + PR['Ftrimmed']) merged = asfolder(outFolder) + PR['Fmerged'] trimfolder(inFolder, trimmed, trimq) if joining_method == "fastq-join": mergefolderfastq(trimmed, merged, fastq_p) elif joining_method == "bbmerge": mergefolderbb(trimmed, merged, maxloose=maxloose) else: raise ("%s: unknown merging metod method" % joining_method) def stop_at_quality_control(inFolder, outFolder, joining_method, trimq, qcq, maxloose, fastq_p): global PR """ """ trimmed = asfolder(outFolder + PR['Ftrimmed']) merged = asfolder(outFolder + PR['Fmerged']) qc = asfolder(outFolder + PR['Fqc']) trimfolder(inFolder, trimmed, trimq) if joining_method == "fastq-join": mergefolderfastq(trimmed, merged, fastq_p) elif joining_method == "bbmerge": mergefolderbb(trimmed, merged, maxloose=maxloose) else: raise ("%s: unknown merging metod method" % joining_method) qualitycontrol(merged, qc, qcq) def stop_at_chimera_removal(inFolder, outFolder, rdb, trimq, joining_method, qcq, maxloose, fastq_p): """ """ global PR trimmed = asfolder(outFolder + PR['Ftrimmed']) merged = asfolder(outFolder + PR['Fmerged']) qc = asfolder(outFolder + PR['Fqc']) chi = asfolder(outFolder + PR['Fchi']) trimfolder(inFolder, trimmed, trimq) if joining_method == "fastq-join": mergefolderfastq(trimmed, merged, fastq_p) elif joining_method == "bbmerge": mergefolderbb(trimmed, merged, maxloose=maxloose) else: raise ("%s: unknown merging metod method" % joining_method) qualitycontrol(merged, qc, qcq) removechimera(qc, chi, rdb) def start_at_chimera_removal(inFolder, outFolder, rdb, depth): global PR qc = asfolder(inFolder) chi = asfolder(outFolder + PR['Fchi']) otus = asfolder(outFolder + PR['Fotus']) div = asfolder(outFolder + PR['Fdiv']) removechimera(qc, chi, rdb) pickotus(chi, otus, rdb) # here if create_mapping_file: create_map(qc, PR['mapping_file']) corediv(otus, div, PR['mapping_file'], depth) def start_otu_pickng(inFolder, outFolder, depth, rdb): """ """ global PR chi = asfolder(inFolder) otus = asfolder(outFolder + PR['Fotus']) div = asfolder(outFolder + PR['Fdiv']) pickotus(chi, otus, rdb) if create_mapping_file: create_map(chi, PR['mapping_file']) corediv(otus, div, PR['mapping_file'], depth) def start_diversity_analysis(inFolder, outFolder, mapping_file, depth): otus = asfolder(inFolder) div = asfolder(outFolder + PR['Fdiv']) corediv(inFolder=otus, outFolder=div, mappingFile=mapping_file, depth=depth) if __name__ == "__main__": parser = argparse.ArgumentParser(description="""Microbiome analysis using multiple methods Version: %s Date: %s """ % (__version__, __date__)) parser.add_argument("-i", # "--input", dest="input", # type=str, help="the input sequences filepath (fastq files) [REQUIRED]", metavar="Input folder", required=True) parser.add_argument("-o", # "--output", dest="output", type=str, metavar="Output folder", help="the output directory [REQUIRED]", required=True) parser.add_argument("-t", dest="trim_threshold", type=int, metavar="trim_phred_threshold", help="phred quality threshold for trimming [default: 12]", default=12) parser.add_argument("-p", type=int, dest="fastq_p", metavar="fastq-join p", help="fastq-join's percentage of mismatch [default: 16]", default=16) parser.add_argument("--adapter", metavar=None, dest="adapter_reference", help="Adapters reference file", type=str) parser.add_argument("-b", dest="beginwith", type=str, metavar="starting step", choices=['otu_picking', 'diversity_analysis', 'chimera_removal'], help="starting the analysis in the middle: (otu_picking), (diversity_analysis), (chimera_removal)") parser.add_argument("-s", dest="stop_at", type=str, metavar="stop at", choices = ['merging', 'quality_control','chimera_removal'], help='terminate the analysis at this step [choices: (merging), (quality_control), (chimera_' 'removal))') parser.add_argument("-j", dest='joining_method', help="choose the merging method (fastq-join) or (bbmerge) [default: fastq-join]", type=str, metavar="joining method", choices = ['fastq-join', "bbmerge"], default="fastq-join") parser.add_argument("-m", dest="maxloose", help="Assign maxloose to be true for bbmerge [default: False]", action="store_true") parser.add_argument("-q", dest="qc_threshold", type=int, metavar="quality control threshold", help="quality control phred threshold [default: 19]", default=19) parser.add_argument("--continuation_reference", dest="c_ref", type=str, metavar="newref_seq.fna", help="reference sequence for continuation. If you want to continue analysis using the reference " "data set from previous analysis. you can find it in the last sample otus folder new_refseqs.fna", default="none") parser.add_argument("--continuation_otu_id", dest="c_otu_id", type=str, metavar=None, help="continuation reference new otus ids", default="New") parser.add_argument("-r", dest="rdb", metavar="Reference database", help="silva, greengenes [default: silva]", choices=['silva', 'greengenes', 'unite'], type=str, default="silva") parser.add_argument("-c", dest="ConfigFile", type=str, metavar="Configuration file name", default='qiime.cfg', help="Configuration file name [default: qiime.cfg]") parser.add_argument("-a", dest="mapping_file", help="Mapping file name", metavar="Mapping file name", type=str) parser.add_argument("--parameter_file_name", help="The name of the parameter file [if not assigned is automatically produced using " "configuration file", type=str, metavar=None, dest="parameter_file_name") parser.add_argument("-n", # "--number_of_cores", help="Specify the number of jobs to start with [default: 2]", type=int, metavar='Number of jobs', dest="number_of_cores", default=2) parser.add_argument("-e", dest="depth", type=int, metavar="Sampling depth", help="sampling depth for diversity analyses [default: 10000]", default=10000) parser.add_argument("--remove_intermediate_files", help="To remove intermediate files, to reduce the disk space", dest="remove_intermediate", action="store_true") # parser.add_argument("--decompress", # help="Copy input files to outputfolder/fastq and decompress them", # dest="decompress", # action="store_true") parser.add_argument("--ml", dest="minimum_length", metavar='Minimum length', type=int, help="Minimum length of reads kept after merging [default: 380]", default=380) parser.add_argument("--primer-trim-f", dest="primertrim_forward", metavar='Primer Trim', type=int, help="length of the forward primer [17]", default=17) parser.add_argument("--primer-trim-r", dest="primertrim_reverse", metavar='Primer Trim', type=int, help="length of the reverse primer [21]", default=21) #x = parser.format_usage() #parser.usage = starting_message #+ x arg = parser.parse_args() PR.update({ 'in_folder': asfolder(arg.input), 'out_folder': asfolder(arg.output), # 'decompress': arg.aaa # ress, 'rdb': arg.rdb, 'qcq': arg.qc_threshold, 'maxloose': arg.maxloose, 'trimq': arg.trim_threshold, 'joining_method': arg.joining_method, 'fastq_p': arg.fastq_p, 'depth': arg.depth, 'ConfigFile': arg.ConfigFile, 'parameter_file_name': arg.parameter_file_name, 'remove_intermediate': arg.remove_intermediate, 'beginwith': arg.beginwith, 'mapping_file': arg.mapping_file, 'adapter_ref': arg.adapter_reference, 'minimum_length': arg.minimum_length, 'c_ref': arg.c_ref, 'c_otu_id': arg.c_otu_id, 'primertrim_forward': arg.primertrim_forward, 'primertrim_reverse': arg.primertrim_reverse}) ## parameter_file get_configuration() check_before_start() if PR['rdb'] == 'unite': PR['fungus'] = True else: PR['fungus'] = False PR['others'] = asfolder(PR['out_folder'] + PR['Fothers']) PR['number_of_cores'] = arg.number_of_cores if PR['number_of_cores'] == 1: PR['np'] = True else: PR['np'] = False if (os.path.isdir(PR['out_folder'])): sys.exit() else: os.mkdir(PR['out_folder']) if not os.path.isdir(PR['others']): os.mkdir(PR['others']) logging.basicConfig(filename=PR['others'] + "log.txt", format='%(levelname)s \n %(message)s', level=logging.DEBUG) loginfo('started') [loginfo(str(P) + ": " + str(PR[P])) for P in PR] # if PR['decompress']: # copyfilesanddecompress(PR['in_folder'], asfolder(PR['out_folder']+"fastq")) # PR['in_folder'] = asfolder(PR['out_folder'])+'fastq/' if arg.parameter_file_name == None: PR['parameter_file_name'] = PR['others'] + "para%s.txt" % PR['id'] write_parameter_file(PR['parameter_file_name']) if arg.mapping_file == None: create_mapping_file = True PR['mapping_file'] = PR['others'] + "map.tsv" else: PR['mapping_file'] = arg.mapping_file if (arg.beginwith == "diversity_analysis") and (arg.mapping_file == None): pass number_of_cores = PR['number_of_cores'] if arg.beginwith == "otu_picking": start_otu_pickng(inFolder=PR['in_folder'], outFolder=PR['out_folder'], rdb=PR['rdb'], depth=PR['depth']) elif arg.beginwith == "diversity_analysis": start_diversity_analysis(inFolder=PR['in_folder'], outFolder=PR['out_folder'], mapping_file=PR['mapping_file'], depth=PR['depth']) elif arg.beginwith == "chimera_removal": start_at_chimera_removal(inFolder=PR['in_folder'], outFolder=PR['out_folder'], rdb= PR['rdb'], depth=PR['depth']) elif arg.stop_at == "chimera_removal": stop_at_chimera_removal(inFolder=PR['in_folder'], outFolder=PR['out_folder'], rdb=PR['rdb'], joining_method=PR['joining_method'], fastq_p=PR['fastq_p'], maxloose=PR['maxloose'], qcq=PR['qcq'], trimq=PR['trimq']) elif arg.stop_at == "merging": stop_at_merging(inFolder=PR['in_folder'], outFolder=PR['out_folder'], joining_method=PR['joining_method'], fastq_p=PR['fastq_p'], maxloose=PR['maxloose'], trimq=PR['trimq']) elif arg.stop_at == "quality_control": stop_at_quality_control(inFolder=PR['in_folder'], outFolder=PR['out_folder'], joining_method=PR['joining_method'], fastq_p=PR['fastq_p'], maxloose=PR['maxloose'], qcq=PR['qcq'], trimq=PR['trimq']) else: full_analysis(inFolder=PR['in_folder'], outFolder=PR['out_folder'], rdb=PR['rdb'], joining_method=PR['joining_method'], fastq_p=PR['fastq_p'], maxloose=PR['maxloose'], qcq=PR['qcq'], depth=PR['depth'], trimq=PR['trimq']) loginfo("Finished")
Attayeb/qanaus
auto-q.py
Python
bsd-3-clause
36,108
[ "BLAST" ]
42a546e4fa444cb24273fb0e725bbabedb47529076602b86d09411b51461e322
import os import time from simtk.openmm import app import simtk.openmm as mm from simtk import unit as u import mdtraj.reporters import sys code = "3DMV" ff_name = "amber99sbnmr" water_name = 'tip3p' which_forcefield = "%s.xml" % ff_name which_water = '%s.xml' % water_name platform_name = "CUDA" timestep = 2.0 * u.femtoseconds cutoff = 0.95 * u.nanometers output_frequency = 500 n_steps = 2500000 temperature = 300. pressure = 1.0 * u.atmospheres rank = int(sys.argv[1]) time.sleep(rank) # This makes sure that no two jobs run at the same time for RNG purpuses. pdb_filename = "./final_boxes/%s_tip3p-fb.pdb" % code dcd_filename = "./equil_box/%s_%s.dcd" % (code, water_name) log_filename = "./equil_box/%s_%s.log" % (code, water_name) traj = mdtraj.load(pdb_filename) top, bonds = traj.top.to_dataframe() atom_indices = top.index[top.chainID == 0].values pdb = app.PDBFile(pdb_filename) topology = pdb.topology positions = pdb.positions ff = app.ForceField(which_forcefield, which_water) platform = mm.Platform.getPlatformByName(platform_name) system = ff.createSystem(topology, nonbondedMethod=app.PME, nonbondedCutoff=cutoff, constraints=app.HBonds) integrator = mm.LangevinIntegrator(temperature, 1.0 / u.picoseconds, timestep) system.addForce(mm.MonteCarloBarostat(pressure, temperature, 25)) simulation = app.Simulation(topology, system, integrator, platform=platform) simulation.context.setPositions(positions) simulation.context.setVelocitiesToTemperature(temperature) print("Using platform %s" % simulation.context.getPlatform().getName()) if os.path.exists(dcd_filename): sys.exit() simulation.reporters.append(mdtraj.reporters.DCDReporter(dcd_filename, output_frequency)) simulation.reporters.append(app.StateDataReporter(open(log_filename, 'w'), output_frequency, step=True, time=True, speed=True)) simulation.step(n_steps)
kyleabeauchamp/fah-projects
old/equilibrate_boxes.py
Python
gpl-2.0
1,862
[ "MDTraj", "OpenMM" ]
d50b7e8e6c824cd2fcb3d131d7621fc3515f0e42d804fb450126f71d518e0a14
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com> # Stephen Fromm <sfromm@gmail.com> # Brian Coca <briancoca+dev@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import os.path import pipes import shutil import tempfile import base64 import re from ansible.plugins.action import ActionBase from ansible.utils.boolean import boolean from ansible.utils.hashing import checksum_s class ActionModule(ActionBase): TRANSFERS_FILES = True def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None): ''' assemble a file from a directory of fragments ''' tmpfd, temp_path = tempfile.mkstemp() tmp = os.fdopen(tmpfd,'w') delimit_me = False add_newline = False for f in sorted(os.listdir(src_path)): if compiled_regexp and not compiled_regexp.search(f): continue fragment = "%s/%s" % (src_path, f) if not os.path.isfile(fragment): continue fragment_content = file(fragment).read() # always put a newline between fragments if the previous fragment didn't end with a newline. if add_newline: tmp.write('\n') # delimiters should only appear between fragments if delimit_me: if delimiter: # un-escape anything like newlines delimiter = delimiter.decode('unicode-escape') tmp.write(delimiter) # always make sure there's a newline after the # delimiter, so lines don't run together if delimiter[-1] != '\n': tmp.write('\n') tmp.write(fragment_content) delimit_me = True if fragment_content.endswith('\n'): add_newline = False else: add_newline = True tmp.close() return temp_path def run(self, tmp=None, task_vars=dict()): src = self._task.args.get('src', None) dest = self._task.args.get('dest', None) delimiter = self._task.args.get('delimiter', None) remote_src = self._task.args.get('remote_src', 'yes') regexp = self._task.args.get('regexp', None) if src is None or dest is None: return dict(failed=True, msg="src and dest are required") if boolean(remote_src): return self._execute_module(tmp=tmp, task_vars=task_vars) elif self._task._role is not None: src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src) else: # the source is local, so expand it here src = self._loader.path_dwim(os.path.expanduser(src)) _re = None if regexp is not None: _re = re.compile(regexp) # Does all work assembling the file path = self._assemble_from_fragments(src, delimiter, _re) path_checksum = checksum_s(path) dest = self._remote_expand_user(dest, tmp) remote_checksum = self._remote_checksum(tmp, dest) if path_checksum != remote_checksum: resultant = file(path).read() # FIXME: diff needs to be moved somewhere else #if self.runner.diff: # dest_result = self._execute_module(module_name='slurp', module_args=dict(path=dest), task_vars=task_vars, tmp=tmp, persist_files=True) # if 'content' in dest_result: # dest_contents = dest_result['content'] # if dest_result['encoding'] == 'base64': # dest_contents = base64.b64decode(dest_contents) # else: # raise Exception("unknown encoding, failed: %s" % dest_result) xfered = self._transfer_data('src', resultant) # fix file permissions when the copy is done as a different user if self._connection_info.become and self._connection_info.become_user != 'root': self._remote_chmod('a+r', xfered, tmp) # run the copy module new_module_args = self._task.args.copy() new_module_args.update( dict( src=xfered, dest=dest, original_basename=os.path.basename(src), ) ) res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp) # FIXME: diff stuff #res.diff = dict(after=resultant) return res else: new_module_args = self._task.args.copy() new_module_args.update( dict( src=xfered, dest=dest, original_basename=os.path.basename(src), ) ) return self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp)
erinn/ansible
lib/ansible/plugins/action/assemble.py
Python
gpl-3.0
5,710
[ "Brian" ]
31dd594c8741ec8931c058ab7ad923a1092a18a5f29d1e4289909095f59fefa6
""" Internal implementation of a SAT solver, used by L{solver.SATSolver}. This is not part of the public API. """ # Copyright (C) 2010, Thomas Leonard # See the README file for details, or visit http://0install.net. # The design of this solver is very heavily based on the one described in # the MiniSat paper "An Extensible SAT-solver [extended version 1.2]" # http://minisat.se/Papers.html # # The main differences are: # # - We care about which solution we find (not just "satisfiable" or "not"). # - We take care to be deterministic (always select the same versions given # the same input). We do not do random restarts, etc. # - We add an AtMostOneClause (the paper suggests this in the Excercises, and # it's very useful for our purposes). def debug(msg, *args): return print "SAT:", msg % args # variables are numbered from 0 # literals have the same number as the corresponding variable, # except they for negatives they are (-1-v): # # Variable Literal not(Literal) # 0 0 -1 # 1 1 -2 def neg(lit): return -1 - lit def watch_index(lit): if lit >= 0: return lit * 2 return neg(lit) * 2 + 1 def makeAtMostOneClause(solver): class AtMostOneClause: def __init__(self, lits): """Preferred literals come first.""" self.lits = lits # The single literal from our set that is True. # We store this explicitly because the decider needs to know quickly. self.current = None def propagate(self, lit): # Re-add ourselves to the watch list. # (we we won't get any more notifications unless we backtrack, # in which case we'd need to get back on the list anyway) solver.watch_lit(lit, self) # value[lit] has just become True assert solver.lit_value(lit) == True assert lit >= 0 #debug("%s: noticed %s has become True" % (self, solver.name_lit(lit))) # If we already propagated successfully when the first # one was set then we set all the others to False and # anyone trying to set one True will get rejected. And # if we didn't propagate yet, current will still be # None, even if we now have a conflict (which we'll # detect below). assert self.current is None self.current = lit # If we later backtrace, call our undo function to unset current solver.get_varinfo_for_lit(lit).undo.append(self) for l in self.lits: value = solver.lit_value(l) #debug("Value of %s is %s" % (solver.name_lit(l), value)) if value is True and l is not lit: # Due to queuing, we might get called with current = None # and two versions already selected. debug("CONFLICT: already selected %s" % solver.name_lit(l)) return False if value is None: # Since one of our lits is already true, all unknown ones # can be set to False. if not solver.enqueue(neg(l), self): debug("CONFLICT: enqueue failed for %s", solver.name_lit(neg(l))) return False # Conflict; abort return True def undo(self, lit): debug("(backtracking: no longer selected %s)" % solver.name_lit(lit)) assert lit == self.current self.current = None # Why is lit True? # Or, why are we causing a conflict (if lit is None)? def cacl_reason(self, lit): if lit is None: # Find two True literals trues = [] for l in self.lits: if solver.lit_value(l) is True: trues.append(l) if len(trues) == 2: return trues else: for l in self.lits: if l is not lit and solver.lit_value(l) is True: return [l] # Find one True literal assert 0 # don't know why! def best_undecided(self): debug("best_undecided: %s" % (solver.name_lits(self.lits))) for lit in self.lits: #debug("%s = %s" % (solver.name_lit(lit), solver.lit_value(lit))) if solver.lit_value(lit) is None: return lit return None def __repr__(self): return "<lone: %s>" % (', '.join(solver.name_lits(self.lits))) return AtMostOneClause def makeUnionClause(solver): class UnionClause: def __init__(self, lits): self.lits = lits # Try to infer new facts. # We can do this only when all of our literals are False except one, # which is undecided. That is, # False... or X or False... = True => X = True # # To get notified when this happens, we tell the solver to # watch two of our undecided literals. Watching two undecided # literals is sufficient. When one changes we check the state # again. If we still have two or more undecided then we switch # to watching them, otherwise we propagate. # # Returns False on conflict. def propagate(self, lit): # value[get(lit)] has just become False #debug("%s: noticed %s has become False" % (self, solver.name_lit(neg(lit)))) # For simplicity, only handle the case where self.lits[1] # is the one that just got set to False, so that: # - value[lits[0]] = None | True # - value[lits[1]] = False # If it's the other way around, just swap them before we start. if self.lits[0] == neg(lit): self.lits[0], self.lits[1] = self.lits[1], self.lits[0] if solver.lit_value(self.lits[0]) == True: # We're already satisfied. Do nothing. solver.watch_lit(lit, self) return True assert solver.lit_value(self.lits[1]) == False # Find a new literal to watch now that lits[1] is resolved, # swap it with lits[1], and start watching it. for i in range(2, len(self.lits)): value = solver.lit_value(self.lits[i]) if value != False: # Could be None or True. If it's True then we've already done our job, # so this means we don't get notified unless we backtrack, which is fine. self.lits[1], self.lits[i] = self.lits[i], self.lits[1] solver.watch_lit(neg(self.lits[1]), self) return True # Only lits[0], is now undefined. solver.watch_lit(lit, self) return solver.enqueue(self.lits[0], self) def undo(self, lit): pass # Why is lit True? # Or, why are we causing a conflict (if lit is None)? def cacl_reason(self, lit): assert lit is None or lit is self.lits[0] # The cause is everything except lit. return [neg(l) for l in self.lits if l is not lit] def __repr__(self): return "<some: %s>" % (', '.join(solver.name_lits(self.lits))) return UnionClause # Using an array of VarInfo objects is less efficient than using multiple arrays, but # easier for me to understand. class VarInfo(object): __slots__ = ['value', 'reason', 'level', 'undo', 'obj'] def __init__(self, obj): self.value = None # True/False/None self.reason = None # The constraint that implied our value, if True or False self.level = -1 # The decision level at which we got a value (when not None) self.undo = [] # Constraints to update if we become unbound (by backtracking) self.obj = obj # The object this corresponds to (for our caller and for debugging) def __repr__(self): return '%s=%s' % (self.name, self.value) @property def name(self): return str(self.obj) class SATProblem(object): def __init__(self): # Propagation self.watches = [] # watches[2i,2i+1] = constraints to check when literal[i] becomes True/False self.propQ = [] # propagation queue # Assignments self.assigns = [] # [VarInfo] self.trail = [] # order of assignments self.trail_lim = [] # decision levels self.toplevel_conflict = False self.makeAtMostOneClause = makeAtMostOneClause(self) self.makeUnionClause = makeUnionClause(self) def get_decision_level(self): return len(self.trail_lim) def add_variable(self, obj): debug("add_variable('%s')", obj) index = len(self.assigns) self.watches += [[], []] # Add watch lists for X and not(X) self.assigns.append(VarInfo(obj)) return index # lit is now True # reason is the clause that is asserting this # Returns False if this immediately causes a conflict. def enqueue(self, lit, reason): debug("%s => %s" % (reason, self.name_lit(lit))) old_value = self.lit_value(lit) if old_value is not None: if old_value is False: # Conflict return False else: # Already set (shouldn't happen) return True if lit < 0: var_info = self.assigns[neg(lit)] var_info.value = False else: var_info = self.assigns[lit] var_info.value = True var_info.level = self.get_decision_level() var_info.reason = reason self.trail.append(lit) self.propQ.append(lit) return True # Pop most recent assignment from self.trail def undo_one(self): lit = self.trail[-1] debug("(pop %s)", self.name_lit(lit)) var_info = self.get_varinfo_for_lit(lit) var_info.value = None var_info.reason = None var_info.level = -1 self.trail.pop() while var_info.undo: var_info.undo.pop().undo(lit) def cancel(self): n_this_level = len(self.trail) - self.trail_lim[-1] debug("backtracking from level %d (%d assignments)" % (self.get_decision_level(), n_this_level)) while n_this_level != 0: self.undo_one() n_this_level -= 1 self.trail_lim.pop() def cancel_until(self, level): while self.get_decision_level() > level: self.cancel() # Process the propQ. # Returns None when done, or the clause that caused a conflict. def propagate(self): #debug("propagate: queue length = %d", len(self.propQ)) while self.propQ: lit = self.propQ[0] del self.propQ[0] wi = watch_index(lit) watches = self.watches[wi] self.watches[wi] = [] debug("%s -> True : watches: %s" % (self.name_lit(lit), watches)) # Notifiy all watchers for i in range(len(watches)): clause = watches[i] if not clause.propagate(lit): # Conflict # Re-add remaining watches self.watches[wi] += watches[i+1:] # No point processing the rest of the queue as # we'll have to backtrack now. self.propQ = [] return clause return None def impossible(self): self.toplevel_conflict = True def get_varinfo_for_lit(self, lit): if lit >= 0: return self.assigns[lit] else: return self.assigns[neg(lit)] def lit_value(self, lit): if lit >= 0: value = self.assigns[lit].value return value else: v = -1 - lit value = self.assigns[v].value if value is None: return None else: return not value # Call cb when lit becomes True def watch_lit(self, lit, cb): #debug("%s is watching for %s to become True" % (cb, self.name_lit(lit))) self.watches[watch_index(lit)].append(cb) # Returns the new clause if one was added, True if none was added # because this clause is trivially True, or False if the clause is # False. def _add_clause(self, lits, learnt): if not lits: assert not learnt self.toplevel_conflict = True return False elif len(lits) == 1: # A clause with only a single literal is represented # as an assignment rather than as a clause. if learnt: reason = "learnt" else: reason = "top-level" return self.enqueue(lits[0], reason) clause = self.makeUnionClause(lits) clause.learnt = learnt if learnt: # lits[0] is None because we just backtracked. # Start watching the next literal that we will # backtrack over. best_level = -1 best_i = 1 for i in range(1, len(lits)): level = self.get_varinfo_for_lit(lits[i]).level if level > best_level: best_level = level best_i = i lits[1], lits[best_i] = lits[best_i], lits[1] # Watch the first two literals in the clause (both must be # undefined at this point). for lit in lits[:2]: self.watch_lit(neg(lit), clause) return clause def name_lits(self, lst): return [self.name_lit(l) for l in lst] # For nicer debug messages def name_lit(self, lit): if lit >= 0: return self.assigns[lit].name return "not(%s)" % self.assigns[neg(lit)].name def add_clause(self, lits): # Public interface. Only used before the solve starts. assert lits debug("add_clause([%s])" % ', '.join(self.name_lits(lits))) if any(self.lit_value(l) == True for l in lits): # Trivially true already. return True lit_set = set(lits) for l in lits: if neg(l) in lit_set: # X or not(X) is always True. return True # Remove duplicates and values known to be False lits = [l for l in lit_set if self.lit_value(l) != False] retval = self._add_clause(lits, learnt = False) if not retval: self.toplevel_conflict = True return retval def at_most_one(self, lits): assert lits debug("at_most_one(%s)" % ', '.join(self.name_lits(lits))) # If we have zero or one literals then we're trivially true # and not really needed for the solve. However, Zero Install # monitors these objects to find out what was selected, so # keep even trivial ones around for that. # #if len(lits) < 2: # return True # Trivially true # Ensure no duplicates assert len(set(lits)) == len(lits), lits # Ignore any literals already known to be False. # If any are True then they're enqueued and we'll process them # soon. lits = [l for l in lits if self.lit_value(l) != False] clause = self.makeAtMostOneClause(lits) for lit in lits: self.watch_lit(lit, clause) return clause def analyse(self, cause): # After trying some assignments, we've discovered a conflict. # e.g. # - we selected A then B then C # - from A, B, C we got X, Y # - we have a rule: not(A) or not(X) or not(Y) # # The simplest thing to do would be: # 1. add the rule "not(A) or not(B) or not(C)" # 2. unassign C # # Then we we'd deduce not(C) and we could try something else. # However, that would be inefficient. We want to learn a more # general rule that will help us with the rest of the problem. # # We take the clause that caused the conflict ("cause") and # ask it for its cause. In this case: # # A and X and Y => conflict # # Since X and Y followed logically from A, B, C there's no # point learning this rule; we need to know to avoid A, B, C # *before* choosing C. We ask the two variables deduced at the # current level (X and Y) what caused them, and work backwards. # e.g. # # X: A and C => X # Y: C => Y # # Combining these, we get the cause of the conflict in terms of # things we knew before the current decision level: # # A and X and Y => conflict # A and (A and C) and (C) => conflict # A and C => conflict # # We can then learn (record) the more general rule: # # not(A) or not(C) # # Then, in future, whenever A is selected we can remove C and # everything that depends on it from consideration. learnt = [None] # The general rule we're learning btlevel = 0 # The deepest decision in learnt p = None # The literal we want to expand now seen = set() # The variables involved in the conflict counter = 0 while True: # cause is the reason why p is True (i.e. it enqueued it). # The first time, p is None, which requests the reason # why it is conflicting. if p is None: debug("Why did %s make us fail?" % cause) p_reason = cause.cacl_reason(p) debug("Because: %s => conflict" % (' and '.join(self.name_lits(p_reason)))) else: debug("Why did %s lead to %s?" % (cause, self.name_lit(p))) p_reason = cause.cacl_reason(p) debug("Because: %s => %s" % (' and '.join(self.name_lits(p_reason)), self.name_lit(p))) # p_reason is in the form (A and B and ...) # p_reason => p # Check each of the variables in p_reason that we haven't # already considered: # - if the variable was assigned at the current level, # mark it for expansion # - otherwise, add it to learnt for lit in p_reason: var_info = self.get_varinfo_for_lit(lit) if var_info not in seen: seen.add(var_info) if var_info.level == self.get_decision_level(): # We deduced this var since the last decision. # It must be in self.trail, so we'll get to it # soon. Remember not to stop until we've processed it. counter += 1 elif var_info.level > 0: # We won't expand lit, just remember it. # (we could expand it if it's not a decision, but # apparently not doing so is useful) learnt.append(neg(lit)) btlevel = max(btlevel, var_info.level) # else we already considered the cause of this assignment # At this point, counter is the number of assigned # variables in self.trail at the current decision level that # we've seen. That is, the number left to process. Pop # the next one off self.trail (as well as any unrelated # variables before it; everything up to the previous # decision has to go anyway). # On the first time round the loop, we must find the # conflict depends on at least one assignment at the # current level. Otherwise, simply setting the decision # variable caused a clause to conflict, in which case # the clause should have asserted not(decision-variable) # before we ever made the decision. # On later times round the loop, counter was already > # 0 before we started iterating over p_reason. assert counter > 0 while True: p = self.trail[-1] var_info = self.get_varinfo_for_lit(p) cause = var_info.reason self.undo_one() if var_info in seen: break debug("(irrelevant)") counter -= 1 if counter <= 0: assert counter == 0 # If counter = 0 then we still have one more # literal (p) at the current level that we # could expand. However, apparently it's best # to leave this unprocessed (says the minisat # paper). break # p is the literal we decided to stop processing on. It's either # a derived variable at the current level, or the decision that # led to this level. Since we're not going to expand it, add it # directly to the learnt clause. learnt[0] = neg(p) debug("Learnt: %s" % (' or '.join(self.name_lits(learnt)))) return learnt, btlevel def run_solver(self, decide): # Check whether we detected a trivial problem # during setup. if self.toplevel_conflict: debug("FAIL: toplevel_conflict before starting solve!") return False while True: # Use logical deduction to simplify the clauses # and assign literals where there is only one possibility. conflicting_clause = self.propagate() if not conflicting_clause: debug("new state: %s", self.assigns) if all(info.value != None for info in self.assigns): # Everything is assigned without conflicts debug("SUCCESS!") return True else: # Pick a variable and try assigning it one way. # If it leads to a conflict, we'll backtrack and # try it the other way. lit = decide() #print "TRYING:", self.name_lit(lit) assert lit is not None, "decide function returned None!" assert self.lit_value(lit) is None self.trail_lim.append(len(self.trail)) r = self.enqueue(lit, reason = "considering") assert r is True else: if self.get_decision_level() == 0: debug("FAIL: conflict found at top level") return False else: # Figure out the root cause of this failure. learnt, backtrack_level = self.analyse(conflicting_clause) self.cancel_until(backtrack_level) c = self._add_clause(learnt, learnt = True) if c is not True: # Everything except the first literal in learnt is known to # be False, so the first must be True. e = self.enqueue(learnt[0], c) assert e is True
timdiels/zeroinstall
zeroinstall/injector/sat.py
Python
lgpl-2.1
19,298
[ "VisIt" ]
4b58bf019576328f1613016aff506be138761dd19b60b8d6d7316237e8defcd8
""" Acceptance tests for course in studio """ from nose.plugins.attrib import attr from common.test.acceptance.tests.studio.base_studio_test import StudioCourseTest from common.test.acceptance.pages.studio.auto_auth import AutoAuthPage from common.test.acceptance.pages.studio.users import CourseTeamPage from common.test.acceptance.pages.studio.index import DashboardPage @attr(shard=2) class CourseTeamPageTest(StudioCourseTest): """ As a course author, I want to be able to add others to my team """ def _make_user(self, username): """ Registers user and returns user representation dictionary as expected by `log_in` function """ user = { 'username': username, 'email': username + "@example.com", 'password': username + '123' } AutoAuthPage( self.browser, no_login=True, username=user.get('username'), email=user.get('email'), password=user.get('password') ).visit() return user def setUp(self, is_staff=False): """ Install a course with no content using a fixture. """ super(CourseTeamPageTest, self).setUp(is_staff) self.other_user = self._make_user('other') self.dashboard_page = DashboardPage(self.browser) self.page = CourseTeamPage( self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'] ) self._go_to_course_team_page() def _go_to_course_team_page(self): """ Opens Course Team page """ self.page.visit() self.page.wait_until_no_loading_indicator() def _refresh_page(self): """ Reload the page. """ self.page = CourseTeamPage( self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'] ) self._go_to_course_team_page() def _assert_current_course(self, visible=True): """ Checks if current course is accessible to current user """ self.dashboard_page.visit() courses = self.dashboard_page.list_courses() def check_course_equality(course1, course2): """ Compares to course dictionaries using org, number and run as keys""" return ( course1['org'] == course2['display_organization'] and course1['number'] == course2['display_coursenumber'] and course1['run'] == course2['run'] ) actual_visible = any((check_course_equality(course, self.course_info) for course in courses)) self.assertEqual(actual_visible, visible) def _assert_user_present(self, user, present=True): """ Checks if specified user present on Course Team page """ if present: self.page.wait_for( lambda: user.get('username') in self.page.usernames, description="Wait for user to be present" ) else: self.page.wait_for( lambda: user.get('username') not in self.page.usernames, description="Wait for user to be absent" ) def _should_see_dialog(self, dialog_type, dialog_message): """ Asserts dialog with specified message is shown """ self.page.modal_dialog_visible(dialog_type) self.assertIn(dialog_message, self.page.modal_dialog_text(dialog_type)) def _assert_is_staff(self, user, can_manage=True): """ Checks if user have staff permissions, can be promoted and can't be demoted """ self.assertIn("staff", user.role_label.lower()) if can_manage: self.assertTrue(user.can_promote) self.assertFalse(user.can_demote) self.assertIn("Add Admin Access", user.promote_button_text) def _assert_is_admin(self, user): """ Checks if user have admin permissions, can't be promoted and can be demoted """ self.assertIn("admin", user.role_label.lower()) self.assertFalse(user.can_promote) self.assertTrue(user.can_demote) self.assertIn("Remove Admin Access", user.demote_button_text) def _assert_can_manage_users(self): """ Checks if current user can manage course team """ self.assertTrue(self.page.has_add_button) for user in self.page.users: self.assertTrue(user.can_promote or user.can_demote) # depending on actual user role self.assertTrue(user.can_delete) def _assert_can_not_manage_users(self): """ Checks if current user can't manage course team """ self.assertFalse(self.page.has_add_button) for user in self.page.users: self.assertFalse(user.can_promote) self.assertFalse(user.can_demote) self.assertFalse(user.can_delete) def test_admins_can_add_other_users(self): """ Scenario: Admins can add other users Given I have opened a new course in Studio And I am viewing the course team settings When I add other user to the course team And other user logs in Then he does see the course on her page """ self.page.add_user_to_course(self.other_user.get('email')) self._assert_user_present(self.other_user, present=True) self.log_in(self.other_user) self._assert_current_course(visible=True) def test_added_users_cannot_add_or_delete_other_users(self): """ Scenario: Added users cannot delete or add other users Given I have opened a new course in Studio And I am viewing the course team settings When I add other user to the course team And other user logs in And he selects the new course And he views the course team settings Then he cannot manage users """ self.page.add_user_to_course(self.other_user.get('email')) self._assert_user_present(self.other_user, present=True) self.log_in(self.other_user) self._assert_current_course(visible=True) self._go_to_course_team_page() bob = self.page.get_user(self.other_user.get('email')) self.assertTrue(bob.is_current_user) self.assertFalse(self.page.has_add_button) self._assert_can_not_manage_users() def test_admins_can_delete_other_users(self): """ Scenario: Admins can delete other users Given I have opened a new course in Studio And I am viewing the course team settings When I add other user to the course team And I delete other user from the course team And other user logs in Then he does not see the course on her page """ self.page.add_user_to_course(self.other_user.get('email')) self._assert_user_present(self.other_user, present=True) self.page.delete_user_from_course(self.other_user.get('email')) self._assert_user_present(self.other_user, present=False) self.log_in(self.other_user) self._assert_current_course(visible=False) def test_admins_cannot_add_users_that_do_not_exist(self): """ Scenario: Admins cannot add users that do not exist Given I have opened a new course in Studio And I am viewing the course team settings When I add "dennis" to the course team Then I should see "Could not find user by email address" somewhere on the page """ self.page.add_user_to_course("dennis@example.com") self._should_see_dialog('error', "Could not find user by email address") def test_admins_should_be_able_to_make_other_people_into_admins(self): """ Scenario: Admins should be able to make other people into admins Given I have opened a new course in Studio And I am viewing the course team settings And I add other user to the course team When I make other user a course team admin And other user logs in And he selects the new course And he views the course team settings Then other user should be marked as an admin And he can manage users """ self.page.add_user_to_course(self.other_user.get('email')) self._assert_user_present(self.other_user, present=True) other = self.page.get_user(self.other_user.get('email')) self._assert_is_staff(other) other.click_promote() self._refresh_page() self._assert_is_admin(other) self.log_in(self.other_user) self._go_to_course_team_page() other = self.page.get_user(self.other_user.get('email')) self.assertTrue(other.is_current_user) self._assert_can_manage_users() def test_admins_should_be_able_to_remove_other_admins(self): """ Scenario: Admins should be able to remove other admins Given I have opened a new course in Studio And I grant admin rights to other user Then he can add, delete, promote and demote users And I am viewing the course team settings When I remove admin rights from other user And other user logs in And he selects the new course And he views the course team settings Then other user should not be marked as an admin And he cannot manage users """ self.page.add_user_to_course(self.other_user.get('email')) self._assert_user_present(self.other_user, present=True) other = self.page.get_user(self.other_user.get('email')) self._assert_is_staff(other) other.click_promote() self._refresh_page() other = self.page.get_user(self.other_user.get('email')) self._assert_is_admin(other) # precondition check - frank is an admin and can add/delete/promote/demote users self.log_in(self.other_user) self._go_to_course_team_page() other = self.page.get_user(self.other_user.get('email')) self.assertTrue(other.is_current_user) self._assert_can_manage_users() self.log_in(self.user) self._go_to_course_team_page() other = self.page.get_user(self.other_user.get('email')) other.click_demote() self._refresh_page() other = self.page.get_user(self.other_user.get('email')) self._assert_is_staff(other) self.log_in(self.other_user) self._go_to_course_team_page() other = self.page.get_user(self.other_user.get('email')) self.assertTrue(other.is_current_user) self._assert_can_not_manage_users() def test_admins_should_be_able_to_remove_themself_if_other_admin_exists(self): """ Scenario: Admins should be able to give course ownership to someone else Given I have opened a new course in Studio And I am viewing the course team settings And I'm the only course admin Then I cannot delete or demote myself When I add other user to the course team And I make other user a course team admin Then I can delete or demote myself When I delete myself from the course team And I am logged into studio Then I do not see the course on my page """ self.page.add_user_to_course(self.other_user.get('email')) self._assert_user_present(self.other_user, present=True) current = self.page.get_user(self.user.get('email')) self.assertFalse(current.can_demote) self.assertFalse(current.can_delete) self.assertIn("Promote another member to Admin to remove your admin rights", current.no_change_warning_text) other = self.page.get_user(self.other_user.get('email')) other.click_promote() self._refresh_page() other = self.page.get_user(self.other_user.get('email')) self._assert_is_admin(other) current = self.page.get_user(self.user.get('email')) self.assertTrue(current.can_demote) self.assertTrue(current.can_delete) current.click_delete() self.log_in(self.user) self._assert_current_course(visible=False) def test_admins_should_be_able_to_give_course_ownership_to_someone_else(self): """ Scenario: Admins should be able to give course ownership to someone else Given I have opened a new course in Studio And I am viewing the course team settings When I add other user to the course team And I make other user a course team admin When I remove admin rights from myself Then I should not be marked as an admin And I cannot manage users And I cannot make myself a course team admin When other user logs in And he selects the new course And he views the course team settings And he deletes me from the course team And I am logged into studio Then I do not see the course on my page """ self.page.add_user_to_course(self.other_user.get('email')) self._assert_user_present(self.other_user, present=True) current = self.page.get_user(self.user.get('email')) self.assertFalse(current.can_demote) self.assertFalse(current.can_delete) self.assertIn("Promote another member to Admin to remove your admin rights", current.no_change_warning_text) other = self.page.get_user(self.other_user.get('email')) other.click_promote() self._refresh_page() other = self.page.get_user(self.other_user.get('email')) self._assert_is_admin(other) current = self.page.get_user(self.user.get('email')) self.assertTrue(current.can_demote) self.assertTrue(current.can_delete) current.click_demote() self._refresh_page() current = self.page.get_user(self.user.get('email')) self._assert_is_staff(current, can_manage=False) self._assert_can_not_manage_users() self.assertFalse(current.can_promote) self.log_in(self.other_user) self._go_to_course_team_page() current = self.page.get_user(self.user.get('email')) current.click_delete() self._refresh_page() self._assert_user_present(self.user, present=False) self.log_in(self.user) self._assert_current_course(visible=False)
louyihua/edx-platform
common/test/acceptance/tests/studio/test_studio_course_team.py
Python
agpl-3.0
14,318
[ "VisIt" ]
42e4da445423aa75c9b26340f183cb89890df0ab966599f63af38c297cdc5c08
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import mock import grpc from grpc.experimental import aio import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import future from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import operation from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.automl_v1.services.auto_ml import AutoMlAsyncClient from google.cloud.automl_v1.services.auto_ml import AutoMlClient from google.cloud.automl_v1.services.auto_ml import pagers from google.cloud.automl_v1.services.auto_ml import transports from google.cloud.automl_v1.types import annotation_spec from google.cloud.automl_v1.types import classification from google.cloud.automl_v1.types import dataset from google.cloud.automl_v1.types import dataset as gca_dataset from google.cloud.automl_v1.types import detection from google.cloud.automl_v1.types import image from google.cloud.automl_v1.types import io from google.cloud.automl_v1.types import model from google.cloud.automl_v1.types import model as gca_model from google.cloud.automl_v1.types import model_evaluation from google.cloud.automl_v1.types import operations from google.cloud.automl_v1.types import service from google.cloud.automl_v1.types import text from google.cloud.automl_v1.types import text_extraction from google.cloud.automl_v1.types import text_sentiment from google.cloud.automl_v1.types import translation from google.longrunning import operations_pb2 from google.oauth2 import service_account from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore import google.auth def client_cert_source_callback(): return b"cert bytes", b"key bytes" # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. def modify_default_endpoint(client): return ( "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT ) def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" sandbox_endpoint = "example.sandbox.googleapis.com" sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" assert AutoMlClient._get_default_mtls_endpoint(None) is None assert AutoMlClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint assert ( AutoMlClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint ) assert ( AutoMlClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint ) assert ( AutoMlClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint ) assert AutoMlClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi @pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient,]) def test_auto_ml_client_from_service_account_info(client_class): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} client = client_class.from_service_account_info(info) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == "automl.googleapis.com:443" @pytest.mark.parametrize( "transport_class,transport_name", [ (transports.AutoMlGrpcTransport, "grpc"), (transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_auto_ml_client_service_account_always_use_jwt(transport_class, transport_name): with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=False) use_jwt.assert_not_called() @pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient,]) def test_auto_ml_client_from_service_account_file(client_class): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == "automl.googleapis.com:443" def test_auto_ml_client_get_transport_class(): transport = AutoMlClient.get_transport_class() available_transports = [ transports.AutoMlGrpcTransport, ] assert transport in available_transports transport = AutoMlClient.get_transport_class("grpc") assert transport == transports.AutoMlGrpcTransport @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), ], ) @mock.patch.object( AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient) ) @mock.patch.object( AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient) ) def test_auto_ml_client_client_options(client_class, transport_class, transport_name): # Check that if channel is provided we won't create a new one. with mock.patch.object(AutoMlClient, "get_transport_class") as gtc: transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. with mock.patch.object(AutoMlClient, "get_transport_class") as gtc: client = client_class(transport=transport_name) gtc.assert_called() # Check the case api_endpoint is provided. options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): client = client_class(transport=transport_name) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError): client = client_class(transport=transport_name) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,use_client_cert_env", [ (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", "true"), ( AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", "true", ), (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", "false"), ( AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", "false", ), ], ) @mock.patch.object( AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient) ) @mock.patch.object( AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient) ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_auto_ml_client_mtls_env_auto( client_class, transport_class, transport_name, use_client_cert_env ): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. # Check the case client_cert_source is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): options = client_options.ClientOptions( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) if use_client_cert_env == "false": expected_client_cert_source = None expected_host = client.DEFAULT_ENDPOINT else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=client_cert_source_callback, ): if use_client_cert_env == "false": expected_host = client.DEFAULT_ENDPOINT expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT expected_client_cert_source = client_cert_source_callback patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient]) @mock.patch.object( AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient) ) @mock.patch.object( AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient) ) def test_auto_ml_client_get_mtls_endpoint_and_cert_source(client_class): mock_client_cert_source = mock.Mock() # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source == mock_client_cert_source # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): mock_client_cert_source = mock.Mock() mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): ( api_endpoint, cert_source, ) = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_auto_ml_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. options = client_options.ClientOptions(scopes=["1", "2"],) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [ (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", grpc_helpers), ( AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) def test_auto_ml_client_client_options_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) def test_auto_ml_client_client_options_from_dict(): with mock.patch( "google.cloud.automl_v1.services.auto_ml.transports.AutoMlGrpcTransport.__init__" ) as grpc_transport: grpc_transport.return_value = None client = AutoMlClient(client_options={"api_endpoint": "squid.clam.whelk"}) grpc_transport.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [ (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", grpc_helpers), ( AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) def test_auto_ml_client_create_channel_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # test that the credentials from file are saved and used as the credentials. with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel" ) as create_channel: creds = ga_credentials.AnonymousCredentials() file_creds = ga_credentials.AnonymousCredentials() load_creds.return_value = (file_creds, None) adc.return_value = (creds, None) client = client_class(client_options=options, transport=transport_name) create_channel.assert_called_with( "automl.googleapis.com:443", credentials=file_creds, credentials_file=None, quota_project_id=None, default_scopes=("https://www.googleapis.com/auth/cloud-platform",), scopes=None, default_host="automl.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize("request_type", [service.CreateDatasetRequest, dict,]) def test_create_dataset(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.create_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.CreateDatasetRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_create_dataset_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_dataset), "__call__") as call: client.create_dataset() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.CreateDatasetRequest() @pytest.mark.asyncio async def test_create_dataset_async( transport: str = "grpc_asyncio", request_type=service.CreateDatasetRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.create_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.CreateDatasetRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_create_dataset_async_from_dict(): await test_create_dataset_async(request_type=dict) def test_create_dataset_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.CreateDatasetRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_dataset), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.create_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] @pytest.mark.asyncio async def test_create_dataset_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.CreateDatasetRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_dataset), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.create_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] def test_create_dataset_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_dataset( parent="parent_value", dataset=gca_dataset.Dataset( translation_dataset_metadata=translation.TranslationDatasetMetadata( source_language_code="source_language_code_value" ) ), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].dataset mock_val = gca_dataset.Dataset( translation_dataset_metadata=translation.TranslationDatasetMetadata( source_language_code="source_language_code_value" ) ) assert arg == mock_val def test_create_dataset_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.create_dataset( service.CreateDatasetRequest(), parent="parent_value", dataset=gca_dataset.Dataset( translation_dataset_metadata=translation.TranslationDatasetMetadata( source_language_code="source_language_code_value" ) ), ) @pytest.mark.asyncio async def test_create_dataset_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_dataset( parent="parent_value", dataset=gca_dataset.Dataset( translation_dataset_metadata=translation.TranslationDatasetMetadata( source_language_code="source_language_code_value" ) ), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].dataset mock_val = gca_dataset.Dataset( translation_dataset_metadata=translation.TranslationDatasetMetadata( source_language_code="source_language_code_value" ) ) assert arg == mock_val @pytest.mark.asyncio async def test_create_dataset_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.create_dataset( service.CreateDatasetRequest(), parent="parent_value", dataset=gca_dataset.Dataset( translation_dataset_metadata=translation.TranslationDatasetMetadata( source_language_code="source_language_code_value" ) ), ) @pytest.mark.parametrize("request_type", [service.GetDatasetRequest, dict,]) def test_get_dataset(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = dataset.Dataset( name="name_value", display_name="display_name_value", description="description_value", example_count=1396, etag="etag_value", translation_dataset_metadata=translation.TranslationDatasetMetadata( source_language_code="source_language_code_value" ), ) response = client.get_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.GetDatasetRequest() # Establish that the response is the type that we expect. assert isinstance(response, dataset.Dataset) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.description == "description_value" assert response.example_count == 1396 assert response.etag == "etag_value" def test_get_dataset_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_dataset), "__call__") as call: client.get_dataset() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.GetDatasetRequest() @pytest.mark.asyncio async def test_get_dataset_async( transport: str = "grpc_asyncio", request_type=service.GetDatasetRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( dataset.Dataset( name="name_value", display_name="display_name_value", description="description_value", example_count=1396, etag="etag_value", ) ) response = await client.get_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.GetDatasetRequest() # Establish that the response is the type that we expect. assert isinstance(response, dataset.Dataset) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.description == "description_value" assert response.example_count == 1396 assert response.etag == "etag_value" @pytest.mark.asyncio async def test_get_dataset_async_from_dict(): await test_get_dataset_async(request_type=dict) def test_get_dataset_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.GetDatasetRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_dataset), "__call__") as call: call.return_value = dataset.Dataset() client.get_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_get_dataset_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.GetDatasetRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_dataset), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) await client.get_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_get_dataset_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = dataset.Dataset() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_dataset(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_get_dataset_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_dataset( service.GetDatasetRequest(), name="name_value", ) @pytest.mark.asyncio async def test_get_dataset_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = dataset.Dataset() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_dataset(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_get_dataset_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_dataset( service.GetDatasetRequest(), name="name_value", ) @pytest.mark.parametrize("request_type", [service.ListDatasetsRequest, dict,]) def test_list_datasets(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_datasets), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = service.ListDatasetsResponse( next_page_token="next_page_token_value", ) response = client.list_datasets(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.ListDatasetsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListDatasetsPager) assert response.next_page_token == "next_page_token_value" def test_list_datasets_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_datasets), "__call__") as call: client.list_datasets() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.ListDatasetsRequest() @pytest.mark.asyncio async def test_list_datasets_async( transport: str = "grpc_asyncio", request_type=service.ListDatasetsRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_datasets), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( service.ListDatasetsResponse(next_page_token="next_page_token_value",) ) response = await client.list_datasets(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.ListDatasetsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListDatasetsAsyncPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio async def test_list_datasets_async_from_dict(): await test_list_datasets_async(request_type=dict) def test_list_datasets_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.ListDatasetsRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_datasets), "__call__") as call: call.return_value = service.ListDatasetsResponse() client.list_datasets(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] @pytest.mark.asyncio async def test_list_datasets_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.ListDatasetsRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_datasets), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( service.ListDatasetsResponse() ) await client.list_datasets(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] def test_list_datasets_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_datasets), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = service.ListDatasetsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_datasets(parent="parent_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val def test_list_datasets_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.list_datasets( service.ListDatasetsRequest(), parent="parent_value", ) @pytest.mark.asyncio async def test_list_datasets_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_datasets), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = service.ListDatasetsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( service.ListDatasetsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.list_datasets(parent="parent_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val @pytest.mark.asyncio async def test_list_datasets_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.list_datasets( service.ListDatasetsRequest(), parent="parent_value", ) def test_list_datasets_pager(transport_name: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_datasets), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( service.ListDatasetsResponse( datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),], next_page_token="abc", ), service.ListDatasetsResponse(datasets=[], next_page_token="def",), service.ListDatasetsResponse( datasets=[dataset.Dataset(),], next_page_token="ghi", ), service.ListDatasetsResponse( datasets=[dataset.Dataset(), dataset.Dataset(),], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_datasets(request={}) assert pager._metadata == metadata results = [i for i in pager] assert len(results) == 6 assert all(isinstance(i, dataset.Dataset) for i in results) def test_list_datasets_pages(transport_name: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_datasets), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( service.ListDatasetsResponse( datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),], next_page_token="abc", ), service.ListDatasetsResponse(datasets=[], next_page_token="def",), service.ListDatasetsResponse( datasets=[dataset.Dataset(),], next_page_token="ghi", ), service.ListDatasetsResponse( datasets=[dataset.Dataset(), dataset.Dataset(),], ), RuntimeError, ) pages = list(client.list_datasets(request={}).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_datasets_async_pager(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_datasets), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( service.ListDatasetsResponse( datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),], next_page_token="abc", ), service.ListDatasetsResponse(datasets=[], next_page_token="def",), service.ListDatasetsResponse( datasets=[dataset.Dataset(),], next_page_token="ghi", ), service.ListDatasetsResponse( datasets=[dataset.Dataset(), dataset.Dataset(),], ), RuntimeError, ) async_pager = await client.list_datasets(request={},) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: responses.append(response) assert len(responses) == 6 assert all(isinstance(i, dataset.Dataset) for i in responses) @pytest.mark.asyncio async def test_list_datasets_async_pages(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_datasets), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( service.ListDatasetsResponse( datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),], next_page_token="abc", ), service.ListDatasetsResponse(datasets=[], next_page_token="def",), service.ListDatasetsResponse( datasets=[dataset.Dataset(),], next_page_token="ghi", ), service.ListDatasetsResponse( datasets=[dataset.Dataset(), dataset.Dataset(),], ), RuntimeError, ) pages = [] async for page_ in (await client.list_datasets(request={})).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.parametrize("request_type", [service.UpdateDatasetRequest, dict,]) def test_update_dataset(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gca_dataset.Dataset( name="name_value", display_name="display_name_value", description="description_value", example_count=1396, etag="etag_value", translation_dataset_metadata=translation.TranslationDatasetMetadata( source_language_code="source_language_code_value" ), ) response = client.update_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.UpdateDatasetRequest() # Establish that the response is the type that we expect. assert isinstance(response, gca_dataset.Dataset) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.description == "description_value" assert response.example_count == 1396 assert response.etag == "etag_value" def test_update_dataset_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_dataset), "__call__") as call: client.update_dataset() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.UpdateDatasetRequest() @pytest.mark.asyncio async def test_update_dataset_async( transport: str = "grpc_asyncio", request_type=service.UpdateDatasetRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gca_dataset.Dataset( name="name_value", display_name="display_name_value", description="description_value", example_count=1396, etag="etag_value", ) ) response = await client.update_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.UpdateDatasetRequest() # Establish that the response is the type that we expect. assert isinstance(response, gca_dataset.Dataset) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.description == "description_value" assert response.example_count == 1396 assert response.etag == "etag_value" @pytest.mark.asyncio async def test_update_dataset_async_from_dict(): await test_update_dataset_async(request_type=dict) def test_update_dataset_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.UpdateDatasetRequest() request.dataset.name = "dataset.name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_dataset), "__call__") as call: call.return_value = gca_dataset.Dataset() client.update_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "dataset.name=dataset.name/value",) in kw[ "metadata" ] @pytest.mark.asyncio async def test_update_dataset_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.UpdateDatasetRequest() request.dataset.name = "dataset.name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_dataset), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) await client.update_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "dataset.name=dataset.name/value",) in kw[ "metadata" ] def test_update_dataset_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gca_dataset.Dataset() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_dataset( dataset=gca_dataset.Dataset( translation_dataset_metadata=translation.TranslationDatasetMetadata( source_language_code="source_language_code_value" ) ), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].dataset mock_val = gca_dataset.Dataset( translation_dataset_metadata=translation.TranslationDatasetMetadata( source_language_code="source_language_code_value" ) ) assert arg == mock_val arg = args[0].update_mask mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val def test_update_dataset_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.update_dataset( service.UpdateDatasetRequest(), dataset=gca_dataset.Dataset( translation_dataset_metadata=translation.TranslationDatasetMetadata( source_language_code="source_language_code_value" ) ), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio async def test_update_dataset_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gca_dataset.Dataset() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_dataset( dataset=gca_dataset.Dataset( translation_dataset_metadata=translation.TranslationDatasetMetadata( source_language_code="source_language_code_value" ) ), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].dataset mock_val = gca_dataset.Dataset( translation_dataset_metadata=translation.TranslationDatasetMetadata( source_language_code="source_language_code_value" ) ) assert arg == mock_val arg = args[0].update_mask mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val @pytest.mark.asyncio async def test_update_dataset_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.update_dataset( service.UpdateDatasetRequest(), dataset=gca_dataset.Dataset( translation_dataset_metadata=translation.TranslationDatasetMetadata( source_language_code="source_language_code_value" ) ), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.parametrize("request_type", [service.DeleteDatasetRequest, dict,]) def test_delete_dataset(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.delete_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.DeleteDatasetRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_delete_dataset_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call: client.delete_dataset() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.DeleteDatasetRequest() @pytest.mark.asyncio async def test_delete_dataset_async( transport: str = "grpc_asyncio", request_type=service.DeleteDatasetRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.delete_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.DeleteDatasetRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_delete_dataset_async_from_dict(): await test_delete_dataset_async(request_type=dict) def test_delete_dataset_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.DeleteDatasetRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.delete_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_delete_dataset_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.DeleteDatasetRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.delete_dataset(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_delete_dataset_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_dataset(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_delete_dataset_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.delete_dataset( service.DeleteDatasetRequest(), name="name_value", ) @pytest.mark.asyncio async def test_delete_dataset_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.delete_dataset(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_delete_dataset_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.delete_dataset( service.DeleteDatasetRequest(), name="name_value", ) @pytest.mark.parametrize("request_type", [service.ImportDataRequest, dict,]) def test_import_data(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.import_data), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.import_data(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.ImportDataRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_import_data_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.import_data), "__call__") as call: client.import_data() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.ImportDataRequest() @pytest.mark.asyncio async def test_import_data_async( transport: str = "grpc_asyncio", request_type=service.ImportDataRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.import_data), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.import_data(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.ImportDataRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_import_data_async_from_dict(): await test_import_data_async(request_type=dict) def test_import_data_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.ImportDataRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.import_data), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.import_data(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_import_data_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.ImportDataRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.import_data), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.import_data(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_import_data_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.import_data), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.import_data( name="name_value", input_config=io.InputConfig( gcs_source=io.GcsSource(input_uris=["input_uris_value"]) ), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val arg = args[0].input_config mock_val = io.InputConfig( gcs_source=io.GcsSource(input_uris=["input_uris_value"]) ) assert arg == mock_val def test_import_data_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.import_data( service.ImportDataRequest(), name="name_value", input_config=io.InputConfig( gcs_source=io.GcsSource(input_uris=["input_uris_value"]) ), ) @pytest.mark.asyncio async def test_import_data_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.import_data), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.import_data( name="name_value", input_config=io.InputConfig( gcs_source=io.GcsSource(input_uris=["input_uris_value"]) ), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val arg = args[0].input_config mock_val = io.InputConfig( gcs_source=io.GcsSource(input_uris=["input_uris_value"]) ) assert arg == mock_val @pytest.mark.asyncio async def test_import_data_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.import_data( service.ImportDataRequest(), name="name_value", input_config=io.InputConfig( gcs_source=io.GcsSource(input_uris=["input_uris_value"]) ), ) @pytest.mark.parametrize("request_type", [service.ExportDataRequest, dict,]) def test_export_data(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_data), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.export_data(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.ExportDataRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_export_data_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_data), "__call__") as call: client.export_data() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.ExportDataRequest() @pytest.mark.asyncio async def test_export_data_async( transport: str = "grpc_asyncio", request_type=service.ExportDataRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_data), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.export_data(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.ExportDataRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_export_data_async_from_dict(): await test_export_data_async(request_type=dict) def test_export_data_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.ExportDataRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_data), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.export_data(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_export_data_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.ExportDataRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_data), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.export_data(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_export_data_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_data), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.export_data( name="name_value", output_config=io.OutputConfig( gcs_destination=io.GcsDestination( output_uri_prefix="output_uri_prefix_value" ) ), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val arg = args[0].output_config mock_val = io.OutputConfig( gcs_destination=io.GcsDestination( output_uri_prefix="output_uri_prefix_value" ) ) assert arg == mock_val def test_export_data_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.export_data( service.ExportDataRequest(), name="name_value", output_config=io.OutputConfig( gcs_destination=io.GcsDestination( output_uri_prefix="output_uri_prefix_value" ) ), ) @pytest.mark.asyncio async def test_export_data_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_data), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.export_data( name="name_value", output_config=io.OutputConfig( gcs_destination=io.GcsDestination( output_uri_prefix="output_uri_prefix_value" ) ), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val arg = args[0].output_config mock_val = io.OutputConfig( gcs_destination=io.GcsDestination( output_uri_prefix="output_uri_prefix_value" ) ) assert arg == mock_val @pytest.mark.asyncio async def test_export_data_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.export_data( service.ExportDataRequest(), name="name_value", output_config=io.OutputConfig( gcs_destination=io.GcsDestination( output_uri_prefix="output_uri_prefix_value" ) ), ) @pytest.mark.parametrize("request_type", [service.GetAnnotationSpecRequest, dict,]) def test_get_annotation_spec(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_annotation_spec), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = annotation_spec.AnnotationSpec( name="name_value", display_name="display_name_value", example_count=1396, ) response = client.get_annotation_spec(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.GetAnnotationSpecRequest() # Establish that the response is the type that we expect. assert isinstance(response, annotation_spec.AnnotationSpec) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.example_count == 1396 def test_get_annotation_spec_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_annotation_spec), "__call__" ) as call: client.get_annotation_spec() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.GetAnnotationSpecRequest() @pytest.mark.asyncio async def test_get_annotation_spec_async( transport: str = "grpc_asyncio", request_type=service.GetAnnotationSpecRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_annotation_spec), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( annotation_spec.AnnotationSpec( name="name_value", display_name="display_name_value", example_count=1396, ) ) response = await client.get_annotation_spec(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.GetAnnotationSpecRequest() # Establish that the response is the type that we expect. assert isinstance(response, annotation_spec.AnnotationSpec) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.example_count == 1396 @pytest.mark.asyncio async def test_get_annotation_spec_async_from_dict(): await test_get_annotation_spec_async(request_type=dict) def test_get_annotation_spec_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.GetAnnotationSpecRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_annotation_spec), "__call__" ) as call: call.return_value = annotation_spec.AnnotationSpec() client.get_annotation_spec(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_get_annotation_spec_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.GetAnnotationSpecRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_annotation_spec), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( annotation_spec.AnnotationSpec() ) await client.get_annotation_spec(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_get_annotation_spec_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_annotation_spec), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = annotation_spec.AnnotationSpec() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_annotation_spec(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_get_annotation_spec_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_annotation_spec( service.GetAnnotationSpecRequest(), name="name_value", ) @pytest.mark.asyncio async def test_get_annotation_spec_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_annotation_spec), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = annotation_spec.AnnotationSpec() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( annotation_spec.AnnotationSpec() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_annotation_spec(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_get_annotation_spec_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_annotation_spec( service.GetAnnotationSpecRequest(), name="name_value", ) @pytest.mark.parametrize("request_type", [service.CreateModelRequest, dict,]) def test_create_model(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.create_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.CreateModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_create_model_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_model), "__call__") as call: client.create_model() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.CreateModelRequest() @pytest.mark.asyncio async def test_create_model_async( transport: str = "grpc_asyncio", request_type=service.CreateModelRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.create_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.CreateModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_create_model_async_from_dict(): await test_create_model_async(request_type=dict) def test_create_model_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.CreateModelRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_model), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.create_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] @pytest.mark.asyncio async def test_create_model_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.CreateModelRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_model), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.create_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] def test_create_model_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_model( parent="parent_value", model=gca_model.Model( translation_model_metadata=translation.TranslationModelMetadata( base_model="base_model_value" ) ), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].model mock_val = gca_model.Model( translation_model_metadata=translation.TranslationModelMetadata( base_model="base_model_value" ) ) assert arg == mock_val def test_create_model_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.create_model( service.CreateModelRequest(), parent="parent_value", model=gca_model.Model( translation_model_metadata=translation.TranslationModelMetadata( base_model="base_model_value" ) ), ) @pytest.mark.asyncio async def test_create_model_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_model( parent="parent_value", model=gca_model.Model( translation_model_metadata=translation.TranslationModelMetadata( base_model="base_model_value" ) ), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].model mock_val = gca_model.Model( translation_model_metadata=translation.TranslationModelMetadata( base_model="base_model_value" ) ) assert arg == mock_val @pytest.mark.asyncio async def test_create_model_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.create_model( service.CreateModelRequest(), parent="parent_value", model=gca_model.Model( translation_model_metadata=translation.TranslationModelMetadata( base_model="base_model_value" ) ), ) @pytest.mark.parametrize("request_type", [service.GetModelRequest, dict,]) def test_get_model(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = model.Model( name="name_value", display_name="display_name_value", dataset_id="dataset_id_value", deployment_state=model.Model.DeploymentState.DEPLOYED, etag="etag_value", translation_model_metadata=translation.TranslationModelMetadata( base_model="base_model_value" ), ) response = client.get_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.GetModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, model.Model) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.dataset_id == "dataset_id_value" assert response.deployment_state == model.Model.DeploymentState.DEPLOYED assert response.etag == "etag_value" def test_get_model_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_model), "__call__") as call: client.get_model() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.GetModelRequest() @pytest.mark.asyncio async def test_get_model_async( transport: str = "grpc_asyncio", request_type=service.GetModelRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( model.Model( name="name_value", display_name="display_name_value", dataset_id="dataset_id_value", deployment_state=model.Model.DeploymentState.DEPLOYED, etag="etag_value", ) ) response = await client.get_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.GetModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, model.Model) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.dataset_id == "dataset_id_value" assert response.deployment_state == model.Model.DeploymentState.DEPLOYED assert response.etag == "etag_value" @pytest.mark.asyncio async def test_get_model_async_from_dict(): await test_get_model_async(request_type=dict) def test_get_model_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.GetModelRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_model), "__call__") as call: call.return_value = model.Model() client.get_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_get_model_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.GetModelRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_model), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) await client.get_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_get_model_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = model.Model() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_model(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_get_model_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_model( service.GetModelRequest(), name="name_value", ) @pytest.mark.asyncio async def test_get_model_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = model.Model() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_model(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_get_model_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_model( service.GetModelRequest(), name="name_value", ) @pytest.mark.parametrize("request_type", [service.ListModelsRequest, dict,]) def test_list_models(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_models), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = service.ListModelsResponse( next_page_token="next_page_token_value", ) response = client.list_models(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.ListModelsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListModelsPager) assert response.next_page_token == "next_page_token_value" def test_list_models_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_models), "__call__") as call: client.list_models() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.ListModelsRequest() @pytest.mark.asyncio async def test_list_models_async( transport: str = "grpc_asyncio", request_type=service.ListModelsRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_models), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( service.ListModelsResponse(next_page_token="next_page_token_value",) ) response = await client.list_models(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.ListModelsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListModelsAsyncPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio async def test_list_models_async_from_dict(): await test_list_models_async(request_type=dict) def test_list_models_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.ListModelsRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_models), "__call__") as call: call.return_value = service.ListModelsResponse() client.list_models(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] @pytest.mark.asyncio async def test_list_models_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.ListModelsRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_models), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( service.ListModelsResponse() ) await client.list_models(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] def test_list_models_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_models), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = service.ListModelsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_models(parent="parent_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val def test_list_models_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.list_models( service.ListModelsRequest(), parent="parent_value", ) @pytest.mark.asyncio async def test_list_models_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_models), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = service.ListModelsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( service.ListModelsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.list_models(parent="parent_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val @pytest.mark.asyncio async def test_list_models_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.list_models( service.ListModelsRequest(), parent="parent_value", ) def test_list_models_pager(transport_name: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_models), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( service.ListModelsResponse( model=[model.Model(), model.Model(), model.Model(),], next_page_token="abc", ), service.ListModelsResponse(model=[], next_page_token="def",), service.ListModelsResponse(model=[model.Model(),], next_page_token="ghi",), service.ListModelsResponse(model=[model.Model(), model.Model(),],), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_models(request={}) assert pager._metadata == metadata results = [i for i in pager] assert len(results) == 6 assert all(isinstance(i, model.Model) for i in results) def test_list_models_pages(transport_name: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_models), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( service.ListModelsResponse( model=[model.Model(), model.Model(), model.Model(),], next_page_token="abc", ), service.ListModelsResponse(model=[], next_page_token="def",), service.ListModelsResponse(model=[model.Model(),], next_page_token="ghi",), service.ListModelsResponse(model=[model.Model(), model.Model(),],), RuntimeError, ) pages = list(client.list_models(request={}).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_models_async_pager(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_models), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( service.ListModelsResponse( model=[model.Model(), model.Model(), model.Model(),], next_page_token="abc", ), service.ListModelsResponse(model=[], next_page_token="def",), service.ListModelsResponse(model=[model.Model(),], next_page_token="ghi",), service.ListModelsResponse(model=[model.Model(), model.Model(),],), RuntimeError, ) async_pager = await client.list_models(request={},) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: responses.append(response) assert len(responses) == 6 assert all(isinstance(i, model.Model) for i in responses) @pytest.mark.asyncio async def test_list_models_async_pages(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_models), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( service.ListModelsResponse( model=[model.Model(), model.Model(), model.Model(),], next_page_token="abc", ), service.ListModelsResponse(model=[], next_page_token="def",), service.ListModelsResponse(model=[model.Model(),], next_page_token="ghi",), service.ListModelsResponse(model=[model.Model(), model.Model(),],), RuntimeError, ) pages = [] async for page_ in (await client.list_models(request={})).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.parametrize("request_type", [service.DeleteModelRequest, dict,]) def test_delete_model(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.delete_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.DeleteModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_delete_model_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_model), "__call__") as call: client.delete_model() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.DeleteModelRequest() @pytest.mark.asyncio async def test_delete_model_async( transport: str = "grpc_asyncio", request_type=service.DeleteModelRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.delete_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.DeleteModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_delete_model_async_from_dict(): await test_delete_model_async(request_type=dict) def test_delete_model_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.DeleteModelRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_model), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.delete_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_delete_model_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.DeleteModelRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_model), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.delete_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_delete_model_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_model(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_delete_model_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.delete_model( service.DeleteModelRequest(), name="name_value", ) @pytest.mark.asyncio async def test_delete_model_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.delete_model(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_delete_model_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.delete_model( service.DeleteModelRequest(), name="name_value", ) @pytest.mark.parametrize("request_type", [service.UpdateModelRequest, dict,]) def test_update_model(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gca_model.Model( name="name_value", display_name="display_name_value", dataset_id="dataset_id_value", deployment_state=gca_model.Model.DeploymentState.DEPLOYED, etag="etag_value", translation_model_metadata=translation.TranslationModelMetadata( base_model="base_model_value" ), ) response = client.update_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.UpdateModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, gca_model.Model) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.dataset_id == "dataset_id_value" assert response.deployment_state == gca_model.Model.DeploymentState.DEPLOYED assert response.etag == "etag_value" def test_update_model_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_model), "__call__") as call: client.update_model() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.UpdateModelRequest() @pytest.mark.asyncio async def test_update_model_async( transport: str = "grpc_asyncio", request_type=service.UpdateModelRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gca_model.Model( name="name_value", display_name="display_name_value", dataset_id="dataset_id_value", deployment_state=gca_model.Model.DeploymentState.DEPLOYED, etag="etag_value", ) ) response = await client.update_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.UpdateModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, gca_model.Model) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.dataset_id == "dataset_id_value" assert response.deployment_state == gca_model.Model.DeploymentState.DEPLOYED assert response.etag == "etag_value" @pytest.mark.asyncio async def test_update_model_async_from_dict(): await test_update_model_async(request_type=dict) def test_update_model_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.UpdateModelRequest() request.model.name = "model.name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_model), "__call__") as call: call.return_value = gca_model.Model() client.update_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "model.name=model.name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_update_model_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.UpdateModelRequest() request.model.name = "model.name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_model), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) await client.update_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "model.name=model.name/value",) in kw["metadata"] def test_update_model_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gca_model.Model() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_model( model=gca_model.Model( translation_model_metadata=translation.TranslationModelMetadata( base_model="base_model_value" ) ), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].model mock_val = gca_model.Model( translation_model_metadata=translation.TranslationModelMetadata( base_model="base_model_value" ) ) assert arg == mock_val arg = args[0].update_mask mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val def test_update_model_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.update_model( service.UpdateModelRequest(), model=gca_model.Model( translation_model_metadata=translation.TranslationModelMetadata( base_model="base_model_value" ) ), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio async def test_update_model_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gca_model.Model() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_model( model=gca_model.Model( translation_model_metadata=translation.TranslationModelMetadata( base_model="base_model_value" ) ), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].model mock_val = gca_model.Model( translation_model_metadata=translation.TranslationModelMetadata( base_model="base_model_value" ) ) assert arg == mock_val arg = args[0].update_mask mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val @pytest.mark.asyncio async def test_update_model_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.update_model( service.UpdateModelRequest(), model=gca_model.Model( translation_model_metadata=translation.TranslationModelMetadata( base_model="base_model_value" ) ), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.parametrize("request_type", [service.DeployModelRequest, dict,]) def test_deploy_model(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.deploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.DeployModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_deploy_model_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: client.deploy_model() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.DeployModelRequest() @pytest.mark.asyncio async def test_deploy_model_async( transport: str = "grpc_asyncio", request_type=service.DeployModelRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.deploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.DeployModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_deploy_model_async_from_dict(): await test_deploy_model_async(request_type=dict) def test_deploy_model_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.DeployModelRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.deploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_deploy_model_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.DeployModelRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.deploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_deploy_model_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.deploy_model(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_deploy_model_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.deploy_model( service.DeployModelRequest(), name="name_value", ) @pytest.mark.asyncio async def test_deploy_model_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.deploy_model(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_deploy_model_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.deploy_model( service.DeployModelRequest(), name="name_value", ) @pytest.mark.parametrize("request_type", [service.UndeployModelRequest, dict,]) def test_undeploy_model(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.undeploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.UndeployModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_undeploy_model_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: client.undeploy_model() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.UndeployModelRequest() @pytest.mark.asyncio async def test_undeploy_model_async( transport: str = "grpc_asyncio", request_type=service.UndeployModelRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.undeploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.UndeployModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_undeploy_model_async_from_dict(): await test_undeploy_model_async(request_type=dict) def test_undeploy_model_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.UndeployModelRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.undeploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_undeploy_model_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.UndeployModelRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.undeploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_undeploy_model_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.undeploy_model(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_undeploy_model_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.undeploy_model( service.UndeployModelRequest(), name="name_value", ) @pytest.mark.asyncio async def test_undeploy_model_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.undeploy_model(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_undeploy_model_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.undeploy_model( service.UndeployModelRequest(), name="name_value", ) @pytest.mark.parametrize("request_type", [service.ExportModelRequest, dict,]) def test_export_model(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.export_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.ExportModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_export_model_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_model), "__call__") as call: client.export_model() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.ExportModelRequest() @pytest.mark.asyncio async def test_export_model_async( transport: str = "grpc_asyncio", request_type=service.ExportModelRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.export_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.ExportModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_export_model_async_from_dict(): await test_export_model_async(request_type=dict) def test_export_model_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.ExportModelRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_model), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.export_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_export_model_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.ExportModelRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_model), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.export_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_export_model_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.export_model( name="name_value", output_config=io.ModelExportOutputConfig( gcs_destination=io.GcsDestination( output_uri_prefix="output_uri_prefix_value" ) ), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val arg = args[0].output_config mock_val = io.ModelExportOutputConfig( gcs_destination=io.GcsDestination( output_uri_prefix="output_uri_prefix_value" ) ) assert arg == mock_val def test_export_model_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.export_model( service.ExportModelRequest(), name="name_value", output_config=io.ModelExportOutputConfig( gcs_destination=io.GcsDestination( output_uri_prefix="output_uri_prefix_value" ) ), ) @pytest.mark.asyncio async def test_export_model_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.export_model( name="name_value", output_config=io.ModelExportOutputConfig( gcs_destination=io.GcsDestination( output_uri_prefix="output_uri_prefix_value" ) ), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val arg = args[0].output_config mock_val = io.ModelExportOutputConfig( gcs_destination=io.GcsDestination( output_uri_prefix="output_uri_prefix_value" ) ) assert arg == mock_val @pytest.mark.asyncio async def test_export_model_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.export_model( service.ExportModelRequest(), name="name_value", output_config=io.ModelExportOutputConfig( gcs_destination=io.GcsDestination( output_uri_prefix="output_uri_prefix_value" ) ), ) @pytest.mark.parametrize("request_type", [service.GetModelEvaluationRequest, dict,]) def test_get_model_evaluation(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_model_evaluation), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = model_evaluation.ModelEvaluation( name="name_value", annotation_spec_id="annotation_spec_id_value", display_name="display_name_value", evaluated_example_count=2446, classification_evaluation_metrics=classification.ClassificationEvaluationMetrics( au_prc=0.634 ), ) response = client.get_model_evaluation(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.GetModelEvaluationRequest() # Establish that the response is the type that we expect. assert isinstance(response, model_evaluation.ModelEvaluation) assert response.name == "name_value" assert response.annotation_spec_id == "annotation_spec_id_value" assert response.display_name == "display_name_value" assert response.evaluated_example_count == 2446 def test_get_model_evaluation_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_model_evaluation), "__call__" ) as call: client.get_model_evaluation() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.GetModelEvaluationRequest() @pytest.mark.asyncio async def test_get_model_evaluation_async( transport: str = "grpc_asyncio", request_type=service.GetModelEvaluationRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_model_evaluation), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( model_evaluation.ModelEvaluation( name="name_value", annotation_spec_id="annotation_spec_id_value", display_name="display_name_value", evaluated_example_count=2446, ) ) response = await client.get_model_evaluation(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.GetModelEvaluationRequest() # Establish that the response is the type that we expect. assert isinstance(response, model_evaluation.ModelEvaluation) assert response.name == "name_value" assert response.annotation_spec_id == "annotation_spec_id_value" assert response.display_name == "display_name_value" assert response.evaluated_example_count == 2446 @pytest.mark.asyncio async def test_get_model_evaluation_async_from_dict(): await test_get_model_evaluation_async(request_type=dict) def test_get_model_evaluation_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.GetModelEvaluationRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_model_evaluation), "__call__" ) as call: call.return_value = model_evaluation.ModelEvaluation() client.get_model_evaluation(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_get_model_evaluation_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.GetModelEvaluationRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_model_evaluation), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( model_evaluation.ModelEvaluation() ) await client.get_model_evaluation(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_get_model_evaluation_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_model_evaluation), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = model_evaluation.ModelEvaluation() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_model_evaluation(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_get_model_evaluation_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_model_evaluation( service.GetModelEvaluationRequest(), name="name_value", ) @pytest.mark.asyncio async def test_get_model_evaluation_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_model_evaluation), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = model_evaluation.ModelEvaluation() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( model_evaluation.ModelEvaluation() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_model_evaluation(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_get_model_evaluation_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_model_evaluation( service.GetModelEvaluationRequest(), name="name_value", ) @pytest.mark.parametrize("request_type", [service.ListModelEvaluationsRequest, dict,]) def test_list_model_evaluations(request_type, transport: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_model_evaluations), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = service.ListModelEvaluationsResponse( next_page_token="next_page_token_value", ) response = client.list_model_evaluations(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == service.ListModelEvaluationsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListModelEvaluationsPager) assert response.next_page_token == "next_page_token_value" def test_list_model_evaluations_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_model_evaluations), "__call__" ) as call: client.list_model_evaluations() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == service.ListModelEvaluationsRequest() @pytest.mark.asyncio async def test_list_model_evaluations_async( transport: str = "grpc_asyncio", request_type=service.ListModelEvaluationsRequest ): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_model_evaluations), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( service.ListModelEvaluationsResponse( next_page_token="next_page_token_value", ) ) response = await client.list_model_evaluations(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == service.ListModelEvaluationsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio async def test_list_model_evaluations_async_from_dict(): await test_list_model_evaluations_async(request_type=dict) def test_list_model_evaluations_field_headers(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.ListModelEvaluationsRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_model_evaluations), "__call__" ) as call: call.return_value = service.ListModelEvaluationsResponse() client.list_model_evaluations(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] @pytest.mark.asyncio async def test_list_model_evaluations_field_headers_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = service.ListModelEvaluationsRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_model_evaluations), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( service.ListModelEvaluationsResponse() ) await client.list_model_evaluations(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] def test_list_model_evaluations_flattened(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_model_evaluations), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = service.ListModelEvaluationsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_model_evaluations( parent="parent_value", filter="filter_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].filter mock_val = "filter_value" assert arg == mock_val def test_list_model_evaluations_flattened_error(): client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.list_model_evaluations( service.ListModelEvaluationsRequest(), parent="parent_value", filter="filter_value", ) @pytest.mark.asyncio async def test_list_model_evaluations_flattened_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_model_evaluations), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = service.ListModelEvaluationsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( service.ListModelEvaluationsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.list_model_evaluations( parent="parent_value", filter="filter_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].filter mock_val = "filter_value" assert arg == mock_val @pytest.mark.asyncio async def test_list_model_evaluations_flattened_error_async(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.list_model_evaluations( service.ListModelEvaluationsRequest(), parent="parent_value", filter="filter_value", ) def test_list_model_evaluations_pager(transport_name: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_model_evaluations), "__call__" ) as call: # Set the response to a series of pages. call.side_effect = ( service.ListModelEvaluationsResponse( model_evaluation=[ model_evaluation.ModelEvaluation(), model_evaluation.ModelEvaluation(), model_evaluation.ModelEvaluation(), ], next_page_token="abc", ), service.ListModelEvaluationsResponse( model_evaluation=[], next_page_token="def", ), service.ListModelEvaluationsResponse( model_evaluation=[model_evaluation.ModelEvaluation(),], next_page_token="ghi", ), service.ListModelEvaluationsResponse( model_evaluation=[ model_evaluation.ModelEvaluation(), model_evaluation.ModelEvaluation(), ], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_model_evaluations(request={}) assert pager._metadata == metadata results = [i for i in pager] assert len(results) == 6 assert all(isinstance(i, model_evaluation.ModelEvaluation) for i in results) def test_list_model_evaluations_pages(transport_name: str = "grpc"): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_model_evaluations), "__call__" ) as call: # Set the response to a series of pages. call.side_effect = ( service.ListModelEvaluationsResponse( model_evaluation=[ model_evaluation.ModelEvaluation(), model_evaluation.ModelEvaluation(), model_evaluation.ModelEvaluation(), ], next_page_token="abc", ), service.ListModelEvaluationsResponse( model_evaluation=[], next_page_token="def", ), service.ListModelEvaluationsResponse( model_evaluation=[model_evaluation.ModelEvaluation(),], next_page_token="ghi", ), service.ListModelEvaluationsResponse( model_evaluation=[ model_evaluation.ModelEvaluation(), model_evaluation.ModelEvaluation(), ], ), RuntimeError, ) pages = list(client.list_model_evaluations(request={}).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_model_evaluations_async_pager(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_model_evaluations), "__call__", new_callable=mock.AsyncMock, ) as call: # Set the response to a series of pages. call.side_effect = ( service.ListModelEvaluationsResponse( model_evaluation=[ model_evaluation.ModelEvaluation(), model_evaluation.ModelEvaluation(), model_evaluation.ModelEvaluation(), ], next_page_token="abc", ), service.ListModelEvaluationsResponse( model_evaluation=[], next_page_token="def", ), service.ListModelEvaluationsResponse( model_evaluation=[model_evaluation.ModelEvaluation(),], next_page_token="ghi", ), service.ListModelEvaluationsResponse( model_evaluation=[ model_evaluation.ModelEvaluation(), model_evaluation.ModelEvaluation(), ], ), RuntimeError, ) async_pager = await client.list_model_evaluations(request={},) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: responses.append(response) assert len(responses) == 6 assert all(isinstance(i, model_evaluation.ModelEvaluation) for i in responses) @pytest.mark.asyncio async def test_list_model_evaluations_async_pages(): client = AutoMlAsyncClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_model_evaluations), "__call__", new_callable=mock.AsyncMock, ) as call: # Set the response to a series of pages. call.side_effect = ( service.ListModelEvaluationsResponse( model_evaluation=[ model_evaluation.ModelEvaluation(), model_evaluation.ModelEvaluation(), model_evaluation.ModelEvaluation(), ], next_page_token="abc", ), service.ListModelEvaluationsResponse( model_evaluation=[], next_page_token="def", ), service.ListModelEvaluationsResponse( model_evaluation=[model_evaluation.ModelEvaluation(),], next_page_token="ghi", ), service.ListModelEvaluationsResponse( model_evaluation=[ model_evaluation.ModelEvaluation(), model_evaluation.ModelEvaluation(), ], ), RuntimeError, ) pages = [] async for page_ in (await client.list_model_evaluations(request={})).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.AutoMlGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.AutoMlGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = AutoMlClient( client_options={"credentials_file": "credentials.json"}, transport=transport, ) # It is an error to provide an api_key and a transport instance. transport = transports.AutoMlGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): client = AutoMlClient(client_options=options, transport=transport,) # It is an error to provide an api_key and a credential. options = mock.Mock() options.api_key = "api_key" with pytest.raises(ValueError): client = AutoMlClient( client_options=options, credentials=ga_credentials.AnonymousCredentials() ) # It is an error to provide scopes and a transport instance. transport = transports.AutoMlGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = AutoMlClient( client_options={"scopes": ["1", "2"]}, transport=transport, ) def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.AutoMlGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) client = AutoMlClient(transport=transport) assert client.transport is transport def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.AutoMlGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel transport = transports.AutoMlGrpcAsyncIOTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel @pytest.mark.parametrize( "transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport,], ) def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(google.auth, "default") as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials(),) assert isinstance(client.transport, transports.AutoMlGrpcTransport,) def test_auto_ml_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.AutoMlTransport( credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json", ) def test_auto_ml_base_transport(): # Instantiate the base transport. with mock.patch( "google.cloud.automl_v1.services.auto_ml.transports.AutoMlTransport.__init__" ) as Transport: Transport.return_value = None transport = transports.AutoMlTransport( credentials=ga_credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly # raise NotImplementedError. methods = ( "create_dataset", "get_dataset", "list_datasets", "update_dataset", "delete_dataset", "import_data", "export_data", "get_annotation_spec", "create_model", "get_model", "list_models", "delete_model", "update_model", "deploy_model", "undeploy_model", "export_model", "get_model_evaluation", "list_model_evaluations", ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) with pytest.raises(NotImplementedError): transport.close() # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): transport.operations_client def test_auto_ml_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch( "google.cloud.automl_v1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.AutoMlTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", scopes=None, default_scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id="octopus", ) def test_auto_ml_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( "google.cloud.automl_v1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.AutoMlTransport() adc.assert_called_once() def test_auto_ml_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) AutoMlClient() adc.assert_called_once_with( scopes=None, default_scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id=None, ) @pytest.mark.parametrize( "transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport,], ) def test_auto_ml_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], default_scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id="octopus", ) @pytest.mark.parametrize( "transport_class,grpc_helpers", [ (transports.AutoMlGrpcTransport, grpc_helpers), (transports.AutoMlGrpcAsyncIOTransport, grpc_helpers_async), ], ) def test_auto_ml_transport_create_channel(transport_class, grpc_helpers): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel", autospec=True ) as create_channel: creds = ga_credentials.AnonymousCredentials() adc.return_value = (creds, None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) create_channel.assert_called_with( "automl.googleapis.com:443", credentials=creds, credentials_file=None, quota_project_id="octopus", default_scopes=("https://www.googleapis.com/auth/cloud-platform",), scopes=["1", "2"], default_host="automl.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize( "transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport], ) def test_auto_ml_grpc_transport_client_cert_source_for_mtls(transport_class): cred = ga_credentials.AnonymousCredentials() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: mock_ssl_channel_creds = mock.Mock() transport_class( host="squid.clam.whelk", credentials=cred, ssl_channel_credentials=mock_ssl_channel_creds, ) mock_create_channel.assert_called_once_with( "squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls # is used. with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: transport_class( credentials=cred, client_cert_source_for_mtls=client_cert_source_callback, ) expected_cert, expected_key = client_cert_source_callback() mock_ssl_cred.assert_called_once_with( certificate_chain=expected_cert, private_key=expected_key ) def test_auto_ml_host_no_port(): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="automl.googleapis.com" ), ) assert client.transport._host == "automl.googleapis.com:443" def test_auto_ml_host_with_port(): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="automl.googleapis.com:8000" ), ) assert client.transport._host == "automl.googleapis.com:8000" def test_auto_ml_grpc_transport_channel(): channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.AutoMlGrpcTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None def test_auto_ml_grpc_asyncio_transport_channel(): channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.AutoMlGrpcAsyncIOTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport], ) def test_auto_ml_transport_channel_mtls_with_client_cert_source(transport_class): with mock.patch( "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel cred = ga_credentials.AnonymousCredentials() with pytest.warns(DeprecationWarning): with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) transport = transport_class( host="squid.clam.whelk", api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=client_cert_source_callback, ) adc.assert_called_once() grpc_ssl_channel_cred.assert_called_once_with( certificate_chain=b"cert bytes", private_key=b"key bytes" ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport], ) def test_auto_ml_transport_channel_mtls_with_adc(transport_class): mock_ssl_cred = mock.Mock() with mock.patch.multiple( "google.auth.transport.grpc.SslCredentials", __init__=mock.Mock(return_value=None), ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel mock_cred = mock.Mock() with pytest.warns(DeprecationWarning): transport = transport_class( host="squid.clam.whelk", credentials=mock_cred, api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=None, ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel def test_auto_ml_grpc_lro_client(): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) transport = client.transport # Ensure that we have a api-core operations client. assert isinstance(transport.operations_client, operations_v1.OperationsClient,) # Ensure that subsequent calls to the property send the exact same object. assert transport.operations_client is transport.operations_client def test_auto_ml_grpc_lro_async_client(): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) transport = client.transport # Ensure that we have a api-core operations client. assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) # Ensure that subsequent calls to the property send the exact same object. assert transport.operations_client is transport.operations_client def test_annotation_spec_path(): project = "squid" location = "clam" dataset = "whelk" annotation_spec = "octopus" expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format( project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) actual = AutoMlClient.annotation_spec_path( project, location, dataset, annotation_spec ) assert expected == actual def test_parse_annotation_spec_path(): expected = { "project": "oyster", "location": "nudibranch", "dataset": "cuttlefish", "annotation_spec": "mussel", } path = AutoMlClient.annotation_spec_path(**expected) # Check that the path construction is reversible. actual = AutoMlClient.parse_annotation_spec_path(path) assert expected == actual def test_dataset_path(): project = "winkle" location = "nautilus" dataset = "scallop" expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) actual = AutoMlClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { "project": "abalone", "location": "squid", "dataset": "clam", } path = AutoMlClient.dataset_path(**expected) # Check that the path construction is reversible. actual = AutoMlClient.parse_dataset_path(path) assert expected == actual def test_model_path(): project = "whelk" location = "octopus" model = "oyster" expected = "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) actual = AutoMlClient.model_path(project, location, model) assert expected == actual def test_parse_model_path(): expected = { "project": "nudibranch", "location": "cuttlefish", "model": "mussel", } path = AutoMlClient.model_path(**expected) # Check that the path construction is reversible. actual = AutoMlClient.parse_model_path(path) assert expected == actual def test_model_evaluation_path(): project = "winkle" location = "nautilus" model = "scallop" model_evaluation = "abalone" expected = "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format( project=project, location=location, model=model, model_evaluation=model_evaluation, ) actual = AutoMlClient.model_evaluation_path( project, location, model, model_evaluation ) assert expected == actual def test_parse_model_evaluation_path(): expected = { "project": "squid", "location": "clam", "model": "whelk", "model_evaluation": "octopus", } path = AutoMlClient.model_evaluation_path(**expected) # Check that the path construction is reversible. actual = AutoMlClient.parse_model_evaluation_path(path) assert expected == actual def test_common_billing_account_path(): billing_account = "oyster" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) actual = AutoMlClient.common_billing_account_path(billing_account) assert expected == actual def test_parse_common_billing_account_path(): expected = { "billing_account": "nudibranch", } path = AutoMlClient.common_billing_account_path(**expected) # Check that the path construction is reversible. actual = AutoMlClient.parse_common_billing_account_path(path) assert expected == actual def test_common_folder_path(): folder = "cuttlefish" expected = "folders/{folder}".format(folder=folder,) actual = AutoMlClient.common_folder_path(folder) assert expected == actual def test_parse_common_folder_path(): expected = { "folder": "mussel", } path = AutoMlClient.common_folder_path(**expected) # Check that the path construction is reversible. actual = AutoMlClient.parse_common_folder_path(path) assert expected == actual def test_common_organization_path(): organization = "winkle" expected = "organizations/{organization}".format(organization=organization,) actual = AutoMlClient.common_organization_path(organization) assert expected == actual def test_parse_common_organization_path(): expected = { "organization": "nautilus", } path = AutoMlClient.common_organization_path(**expected) # Check that the path construction is reversible. actual = AutoMlClient.parse_common_organization_path(path) assert expected == actual def test_common_project_path(): project = "scallop" expected = "projects/{project}".format(project=project,) actual = AutoMlClient.common_project_path(project) assert expected == actual def test_parse_common_project_path(): expected = { "project": "abalone", } path = AutoMlClient.common_project_path(**expected) # Check that the path construction is reversible. actual = AutoMlClient.parse_common_project_path(path) assert expected == actual def test_common_location_path(): project = "squid" location = "clam" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) actual = AutoMlClient.common_location_path(project, location) assert expected == actual def test_parse_common_location_path(): expected = { "project": "whelk", "location": "octopus", } path = AutoMlClient.common_location_path(**expected) # Check that the path construction is reversible. actual = AutoMlClient.parse_common_location_path(path) assert expected == actual def test_client_with_default_client_info(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object( transports.AutoMlTransport, "_prep_wrapped_messages" ) as prep: client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) with mock.patch.object( transports.AutoMlTransport, "_prep_wrapped_messages" ) as prep: transport_class = AutoMlClient.get_transport_class() transport = transport_class( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) @pytest.mark.asyncio async def test_transport_close_async(): client = AutoMlAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) with mock.patch.object( type(getattr(client.transport, "grpc_channel")), "close" ) as close: async with client: close.assert_not_called() close.assert_called_once() def test_transport_close(): transports = { "grpc": "_grpc_channel", } for transport, close_name in transports.items(): client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) with mock.patch.object( type(getattr(client.transport, close_name)), "close" ) as close: with client: close.assert_not_called() close.assert_called_once() def test_client_ctx(): transports = [ "grpc", ] for transport in transports: client = AutoMlClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) # Test client calls underlying transport. with mock.patch.object(type(client.transport), "close") as close: close.assert_not_called() with client: pass close.assert_called() @pytest.mark.parametrize( "client_class,transport_class", [ (AutoMlClient, transports.AutoMlGrpcTransport), (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport), ], ) def test_api_key_credentials(client_class, transport_class): with mock.patch.object( google.auth._default, "get_api_key_credentials", create=True ) as get_api_key_credentials: mock_cred = mock.Mock() get_api_key_credentials.return_value = mock_cred options = client_options.ClientOptions() options.api_key = "api_key" with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, )
googleapis/python-automl
tests/unit/gapic/automl_v1/test_auto_ml.py
Python
apache-2.0
220,732
[ "Octopus" ]
a79b59ea0c9a8de6061eb5eef5c1244084f1781366ae8c5c6e09094e67905ba7
# Copyright (C) 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import proto from google.protobuf.json_format import MessageToJson, Parse, ParseError def test_message_to_json(): class Squid(proto.Message): mass_kg = proto.Field(proto.INT32, number=1) s = Squid(mass_kg=100) json = Squid.to_json(s) json = json.replace(" ", "").replace("\n", "") assert json == '{"massKg":100}' def test_message_from_json(): class Squid(proto.Message): mass_kg = proto.Field(proto.INT32, number=1) json = """{ "massKg": 100 } """ s = Squid.from_json(json) assert s == Squid(mass_kg=100) def test_message_json_round_trip(): class Squid(proto.Message): mass_kg = proto.Field(proto.INT32, number=1) s = Squid(mass_kg=100) json = Squid.to_json(s) s2 = Squid.from_json(json) assert s == s2 def test_json_stringy_enums(): class Squid(proto.Message): zone = proto.Field(proto.ENUM, number=1, enum="Zone") class Zone(proto.Enum): EPIPELAGIC = 0 MESOPELAGIC = 1 BATHYPELAGIC = 2 ABYSSOPELAGIC = 3 s1 = Squid(zone=Zone.MESOPELAGIC) json = ( Squid.to_json(s1, use_integers_for_enums=False) .replace(" ", "") .replace("\n", "") ) assert json == '{"zone":"MESOPELAGIC"}' s2 = Squid.from_json(json) assert s2.zone == s1.zone def test_json_default_enums(): class Squid(proto.Message): zone = proto.Field(proto.ENUM, number=1, enum="Zone") class Zone(proto.Enum): EPIPELAGIC = 0 MESOPELAGIC = 1 BATHYPELAGIC = 2 ABYSSOPELAGIC = 3 s = Squid() assert s.zone == Zone.EPIPELAGIC json1 = Squid.to_json(s).replace(" ", "").replace("\n", "") assert json1 == '{"zone":0}' json2 = ( Squid.to_json(s, use_integers_for_enums=False) .replace(" ", "") .replace("\n", "") ) assert json2 == '{"zone":"EPIPELAGIC"}' def test_json_default_values(): class Squid(proto.Message): mass_kg = proto.Field(proto.INT32, number=1) name = proto.Field(proto.STRING, number=2) s = Squid(name="Steve") json1 = ( Squid.to_json(s, including_default_value_fields=False) .replace(" ", "") .replace("\n", "") ) assert json1 == '{"name":"Steve"}' json2 = Squid.to_json(s).replace(" ", "").replace("\n", "") assert ( json2 == '{"name":"Steve","massKg":0}' or json2 == '{"massKg":0,"name":"Steve"}' ) s1 = Squid.from_json(json1) s2 = Squid.from_json(json2) assert s == s1 == s2 def test_json_unknown_field(): # Note that 'lengthCm' is unknown in the local definition. # This could happen if the client is using an older proto definition # than the server. json_str = '{\n "massKg": 20,\n "lengthCm": 100\n}' class Octopus(proto.Message): mass_kg = proto.Field(proto.INT32, number=1) o = Octopus.from_json(json_str, ignore_unknown_fields=True) assert not hasattr(o, "length_cm") assert not hasattr(o, "lengthCm") # Don't permit unknown fields by default with pytest.raises(ParseError): o = Octopus.from_json(json_str) def test_json_snake_case(): class Squid(proto.Message): mass_kg = proto.Field(proto.INT32, number=1) json_str = '{\n "mass_kg": 20\n}' s = Squid.from_json(json_str) assert s.mass_kg == 20 assert Squid.to_json(s, preserving_proto_field_name=True) == json_str def test_json_name(): class Squid(proto.Message): massKg = proto.Field(proto.INT32, number=1, json_name="mass_in_kilograms") s = Squid(massKg=20) j = Squid.to_json(s) assert "mass_in_kilograms" in j s_two = Squid.from_json(j) assert s == s_two
googleapis/proto-plus-python
tests/test_json.py
Python
apache-2.0
4,319
[ "Octopus" ]
0d86e82889329de1dc40d7883e7f8838460e2cc6fc89b7159ce6b157458b98b2
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ Created on Nov 10, 2012 @author: shyue """ from pymatgen.util.testing import PymatgenTest __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2011, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "shyuep@gmail.com" __status__ = "Production" __date__ = "Nov 10, 2012" import unittest from pymatgen.core.periodic_table import Element, Specie from pymatgen.core.composition import Composition, CompositionError, \ ChemicalPotential import random class CompositionTest(PymatgenTest): def setUp(self): self.comp = list() self.comp.append(Composition("Li3Fe2(PO4)3")) self.comp.append(Composition("Li3Fe(PO4)O")) self.comp.append(Composition("LiMn2O4")) self.comp.append(Composition("Li4O4")) self.comp.append(Composition("Li3Fe2Mo3O12")) self.comp.append(Composition("Li3Fe2((PO4)3(CO3)5)2")) self.comp.append(Composition("Li1.5Si0.5")) self.comp.append(Composition("ZnOH")) self.indeterminate_comp = [] self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula("Co1", True) ) self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula("Co1", False) ) self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula("co2o3") ) self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula("ncalu") ) self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula("calun") ) self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula( "liCoo2n (pO4)2") ) self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula( "(co)2 (PO)4") ) self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula("Fee3")) def test_immutable(self): try: self.comp[0]["Fe"] = 1 except Exception as ex: self.assertIsInstance(ex, TypeError) try: del self.comp[0]["Fe"] except Exception as ex: self.assertIsInstance(ex, TypeError) def test_in(self): self.assertIn("Fe", self.comp[0]) self.assertNotIn("Fe", self.comp[2]) self.assertIn(Element("Fe"), self.comp[0]) self.assertEqual(self.comp[0]["Fe"], 2) self.assertEqual(self.comp[0]["Mn"], 0) self.assertRaises(TypeError, self.comp[0].__getitem__, "Hello") self.assertRaises(TypeError, self.comp[0].__getitem__, "Vac") def test_hill_formula(self): c = Composition("CaCO3") self.assertEqual(c.hill_formula, "C Ca O3") c = Composition("C2H5OH") self.assertEqual(c.hill_formula, "C2 H6 O") def test_init_(self): self.assertRaises(CompositionError, Composition, {"H": -0.1}) f = {'Fe': 4, 'Li': 4, 'O': 16, 'P': 4} self.assertEqual("Li4 Fe4 P4 O16", Composition(f).formula) f = {None: 4, 'Li': 4, 'O': 16, 'P': 4} self.assertRaises(TypeError, Composition, f) f = {1: 2, 8: 1} self.assertEqual("H2 O1", Composition(f).formula) self.assertEqual("Na2 O1", Composition(Na=2, O=1).formula) c = Composition({'S': Composition.amount_tolerance / 2}) self.assertEqual(len(c.elements), 0) def test_average_electroneg(self): val = [2.7224999999999997, 2.4160000000000004, 2.5485714285714285, 2.21, 2.718, 3.08, 1.21, 2.43] for i, c in enumerate(self.comp): self.assertAlmostEqual(c.average_electroneg, val[i]) def test_total_electrons(self): test_cases = {'C': 6, 'SrTiO3': 84} for item in test_cases.keys(): c = Composition(item) self.assertAlmostEqual(c.total_electrons, test_cases[item]) def test_formula(self): correct_formulas = ['Li3 Fe2 P3 O12', 'Li3 Fe1 P1 O5', 'Li1 Mn2 O4', 'Li4 O4', 'Li3 Fe2 Mo3 O12', 'Li3 Fe2 P6 C10 O54', 'Li1.5 Si0.5', 'Zn1 H1 O1'] all_formulas = [c.formula for c in self.comp] self.assertEqual(all_formulas, correct_formulas) self.assertRaises(CompositionError, Composition, "(co2)(po4)2") self.assertEqual(Composition("K Na 2").reduced_formula, "KNa2") self.assertEqual(Composition("K3 Na 2").reduced_formula, "K3Na2") self.assertEqual(Composition("Na 3 Zr (PO 4) 3").reduced_formula, "Na3Zr(PO4)3") def test_iupac_formula(self): correct_formulas = ['Li3 Fe2 P3 O12', 'Li3 Fe1 P1 O5', 'Li1 Mn2 O4', 'Li4 O4', 'Li3 Mo3 Fe2 O12', 'Li3 Fe2 C10 P6 O54', 'Li1.5 Si0.5', 'Zn1 H1 O1'] all_formulas = [c.iupac_formula for c in self.comp] self.assertEqual(all_formulas, correct_formulas) def test_mixed_valence(self): comp = Composition({"Fe2+": 2, "Fe3+": 4, "Li+": 8}) self.assertEqual(comp.reduced_formula, "Li4Fe3") self.assertEqual(comp.alphabetical_formula, "Fe6 Li8") self.assertEqual(comp.formula, "Li8 Fe6") def test_indeterminate_formula(self): correct_formulas = [["Co1"], ["Co1", "C1 O1"], ["Co2 O3", "C1 O5"], ["N1 Ca1 Lu1", "U1 Al1 C1 N1"], ["N1 Ca1 Lu1", "U1 Al1 C1 N1"], ["Li1 Co1 P2 N1 O10", "Li1 Co1 Po8 N1 O2", "Li1 P2 C1 N1 O11", "Li1 Po8 C1 N1 O3"], ["Co2 P4 O4", "Co2 Po4", "P4 C2 O6", "Po4 C2 O2"], []] for i, c in enumerate(correct_formulas): self.assertEqual([Composition(comp) for comp in c], self.indeterminate_comp[i]) def test_alphabetical_formula(self): correct_formulas = ['Fe2 Li3 O12 P3', 'Fe1 Li3 O5 P1', 'Li1 Mn2 O4', 'Li4 O4', 'Fe2 Li3 Mo3 O12', 'C10 Fe2 Li3 O54 P6', 'Li1.5 Si0.5', 'H1 O1 Zn1'] all_formulas = [c.alphabetical_formula for c in self.comp] self.assertEqual(all_formulas, correct_formulas) def test_reduced_composition(self): correct_reduced_formulas = ['Li3Fe2(PO4)3', 'Li3FePO5', 'LiMn2O4', 'Li2O2', 'Li3Fe2(MoO4)3', 'Li3Fe2P6(C5O27)2', 'Li1.5Si0.5', 'ZnHO'] for i in range(len(self.comp)): self.assertEqual(self.comp[i] .get_reduced_composition_and_factor()[0], Composition(correct_reduced_formulas[i])) def test_reduced_formula(self): correct_reduced_formulas = ['Li3Fe2(PO4)3', 'Li3FePO5', 'LiMn2O4', 'Li2O2', 'Li3Fe2(MoO4)3', 'Li3Fe2P6(C5O27)2', 'Li1.5Si0.5', 'ZnHO'] all_formulas = [c.reduced_formula for c in self.comp] self.assertEqual(all_formulas, correct_reduced_formulas) # test iupac reduced formula (polyanions should still appear at the end) all_formulas = [c.get_reduced_formula_and_factor(iupac_ordering=True)[0] for c in self.comp] self.assertEqual(all_formulas, correct_reduced_formulas) self.assertEqual( Composition('H6CN').get_integer_formula_and_factor( iupac_ordering=True)[0], 'CNH6') # test rounding c = Composition({'Na': 2 - Composition.amount_tolerance / 2, 'Cl': 2}) self.assertEqual('NaCl', c.reduced_formula) def test_integer_formula(self): correct_reduced_formulas = ['Li3Fe2(PO4)3', 'Li3FePO5', 'LiMn2O4', 'Li2O2', 'Li3Fe2(MoO4)3', 'Li3Fe2P6(C5O27)2', 'Li3Si', 'ZnHO'] all_formulas = [c.get_integer_formula_and_factor()[0] for c in self.comp] self.assertEqual(all_formulas, correct_reduced_formulas) self.assertEqual(Composition('Li0.5O0.25').get_integer_formula_and_factor(), ('Li2O', 0.25)) self.assertEqual(Composition('O0.25').get_integer_formula_and_factor(), ('O2', 0.125)) formula, factor = Composition( "Li0.16666667B1.0H1.0").get_integer_formula_and_factor() self.assertEqual(formula, 'Li(BH)6') self.assertAlmostEqual(factor, 1 / 6) # test iupac reduced formula (polyanions should still appear at the end) all_formulas = [c.get_integer_formula_and_factor(iupac_ordering=True)[0] for c in self.comp] self.assertEqual(all_formulas, correct_reduced_formulas) self.assertEqual( Composition('H6CN0.5').get_integer_formula_and_factor( iupac_ordering=True), ('C2NH12', 0.5)) def test_num_atoms(self): correct_num_atoms = [20, 10, 7, 8, 20, 75, 2, 3] all_natoms = [c.num_atoms for c in self.comp] self.assertEqual(all_natoms, correct_num_atoms) def test_weight(self): correct_weights = [417.427086, 187.63876199999999, 180.81469, 91.7616, 612.3258, 1302.430172, 24.454250000000002, 82.41634] all_weights = [c.weight for c in self.comp] self.assertArrayAlmostEqual(all_weights, correct_weights, 5) def test_get_atomic_fraction(self): correct_at_frac = {"Li": 0.15, "Fe": 0.1, "P": 0.15, "O": 0.6} for el in ["Li", "Fe", "P", "O"]: self.assertEqual(self.comp[0].get_atomic_fraction(el), correct_at_frac[el], "Wrong computed atomic fractions") self.assertEqual(self.comp[0].get_atomic_fraction("S"), 0, "Wrong computed atomic fractions") def test_anonymized_formula(self): expected_formulas = ['A2B3C3D12', 'ABC3D5', 'AB2C4', 'AB', 'A2B3C3D12', 'A2B3C6D10E54', 'A0.5B1.5', 'ABC'] for i in range(len(self.comp)): self.assertEqual(self.comp[i].anonymized_formula, expected_formulas[i]) def test_get_wt_fraction(self): correct_wt_frac = {"Li": 0.0498841610868, "Fe": 0.267567687258, "P": 0.222604831158, "O": 0.459943320496} for el in ["Li", "Fe", "P", "O"]: self.assertAlmostEqual(correct_wt_frac[el], self.comp[0].get_wt_fraction(el), 5, "Wrong computed weight fraction") self.assertEqual(self.comp[0].get_wt_fraction(Element("S")), 0, "Wrong computed weight fractions") def test_from_dict(self): sym_dict = {"Fe": 6, "O": 8} self.assertEqual(Composition.from_dict(sym_dict).reduced_formula, "Fe3O4", "Creation form sym_amount dictionary failed!") comp = Composition({"Fe2+": 2, "Fe3+": 4, "O2-": 8}) comp2 = Composition.from_dict(comp.as_dict()) self.assertEqual(comp, comp2) def test_as_dict(self): c = Composition.from_dict({'Fe': 4, 'O': 6}) d = c.as_dict() correct_dict = {'Fe': 4.0, 'O': 6.0} self.assertEqual(d['Fe'], correct_dict['Fe']) self.assertEqual(d['O'], correct_dict['O']) correct_dict = {'Fe': 2.0, 'O': 3.0} d = c.to_reduced_dict self.assertEqual(d['Fe'], correct_dict['Fe']) self.assertEqual(d['O'], correct_dict['O']) def test_pickle(self): for c in self.comp: self.serialize_with_pickle(c, test_eq=True) self.serialize_with_pickle(c.to_data_dict, test_eq=True) def test_to_data_dict(self): comp = Composition('Fe0.00009Ni0.99991') d = comp.to_data_dict self.assertAlmostEqual(d["reduced_cell_composition"]["Fe"], 9e-5) def test_add(self): self.assertEqual((self.comp[0] + self.comp[2]).formula, "Li4 Mn2 Fe2 P3 O16", "Incorrect composition after addition!") self.assertEqual((self.comp[3] + {"Fe": 4, "O": 4}).formula, "Li4 Fe4 O8", "Incorrect composition after addition!") def test_sub(self): self.assertEqual((self.comp[0] - Composition("Li2O")).formula, "Li1 Fe2 P3 O11", "Incorrect composition after addition!") self.assertEqual((self.comp[0] - {"Fe": 2, "O": 3}).formula, "Li3 P3 O9") self.assertRaises(CompositionError, Composition('O').__sub__, Composition('H')) # check that S is completely removed by subtraction c1 = Composition({'S': 1 + Composition.amount_tolerance / 2, 'O': 1}) c2 = Composition({'S': 1}) self.assertEqual(len((c1 - c2).elements), 1) def test_mul(self): self.assertEqual((self.comp[0] * 4).formula, "Li12 Fe8 P12 O48") self.assertEqual((3 * self.comp[1]).formula, "Li9 Fe3 P3 O15") def test_div(self): self.assertEqual((self.comp[0] / 4).formula, 'Li0.75 Fe0.5 P0.75 O3') def test_equals(self): random_z = random.randint(1, 92) fixed_el = Element.from_Z(random_z) other_z = random.randint(1, 92) while other_z == random_z: other_z = random.randint(1, 92) comp1 = Composition({fixed_el: 1, Element.from_Z(other_z): 0}) other_z = random.randint(1, 92) while other_z == random_z: other_z = random.randint(1, 92) comp2 = Composition({fixed_el: 1, Element.from_Z(other_z): 0}) self.assertEqual(comp1, comp2, "Composition equality test failed. " + "%s should be equal to %s" % (comp1.formula, comp2.formula)) self.assertEqual(comp1.__hash__(), comp2.__hash__(), "Hashcode equality test failed!") def test_comparisons(self): c1 = Composition({'S': 1}) c1_1 = Composition({'S': 1.00000000000001}) c2 = Composition({'S': 2}) c3 = Composition({'O': 1}) c4 = Composition({'O': 1, 'S': 1}) self.assertFalse(c1 > c2) self.assertFalse(c1_1 > c1) self.assertFalse(c1_1 < c1) self.assertTrue(c1 > c3) self.assertTrue(c3 < c1) self.assertTrue(c4 > c1) self.assertEqual(sorted([c1, c1_1, c2, c4, c3]), [c3, c1, c1_1, c4, c2]) def test_almost_equals(self): c1 = Composition({'Fe': 2.0, 'O': 3.0, 'Mn': 0}) c2 = Composition({'O': 3.2, 'Fe': 1.9, 'Zn': 0}) c3 = Composition({'Ag': 2.0, 'O': 3.0}) c4 = Composition({'Fe': 2.0, 'O': 3.0, 'Ag': 2.0}) self.assertTrue(c1.almost_equals(c2, rtol=0.1)) self.assertFalse(c1.almost_equals(c2, rtol=0.01)) self.assertFalse(c1.almost_equals(c3, rtol=0.1)) self.assertFalse(c1.almost_equals(c4, rtol=0.1)) def test_equality(self): self.assertTrue(self.comp[0].__eq__(self.comp[0])) self.assertFalse(self.comp[0].__eq__(self.comp[1])) self.assertFalse(self.comp[0].__ne__(self.comp[0])) self.assertTrue(self.comp[0].__ne__(self.comp[1])) def test_fractional_composition(self): for c in self.comp: self.assertAlmostEqual(c.fractional_composition.num_atoms, 1) def test_init_numerical_tolerance(self): self.assertEqual(Composition({'B': 1, 'C': -1e-12}), Composition('B')) def test_negative_compositions(self): self.assertEqual(Composition('Li-1(PO-1)4', allow_negative=True).formula, 'Li-1 P4 O-4') self.assertEqual(Composition('Li-1(PO-1)4', allow_negative=True).reduced_formula, 'Li-1(PO-1)4') self.assertEqual(Composition('Li-2Mg4', allow_negative=True).reduced_composition, Composition('Li-1Mg2', allow_negative=True)) self.assertEqual(Composition('Li-2.5Mg4', allow_negative=True).reduced_composition, Composition('Li-2.5Mg4', allow_negative=True)) # test math c1 = Composition('LiCl', allow_negative=True) c2 = Composition('Li') self.assertEqual(c1 - 2 * c2, Composition({'Li': -1, 'Cl': 1}, allow_negative=True)) self.assertEqual((c1 + c2).allow_negative, True) self.assertEqual(c1 / -1, Composition('Li-1Cl-1', allow_negative=True)) # test num_atoms c1 = Composition('Mg-1Li', allow_negative=True) self.assertEqual(c1.num_atoms, 2) self.assertEqual(c1.get_atomic_fraction('Mg'), 0.5) self.assertEqual(c1.get_atomic_fraction('Li'), 0.5) self.assertEqual(c1.fractional_composition, Composition('Mg-0.5Li0.5', allow_negative=True)) # test copy self.assertEqual(c1.copy(), c1) # test species c1 = Composition({'Mg': 1, 'Mg2+': -1}, allow_negative=True) self.assertEqual(c1.num_atoms, 2) self.assertEqual(c1.element_composition, Composition()) self.assertEqual(c1.average_electroneg, 1.31) def test_special_formulas(self): special_formulas = {"LiO": "Li2O2", "NaO": "Na2O2", "KO": "K2O2", "HO": "H2O2", "CsO": "Cs2O2", "RbO": "Rb2O2", "O": "O2", "N": "N2", "F": "F2", "Cl": "Cl2", "H": "H2"} for k, v in special_formulas.items(): self.assertEqual(Composition(k).reduced_formula, v) def test_oxi_state_guesses(self): self.assertEqual(Composition("LiFeO2").oxi_state_guesses(), ({"Li": 1, "Fe": 3, "O": -2},)) self.assertEqual(Composition("Fe4O5").oxi_state_guesses(), ({"Fe": 2.5, "O": -2},)) self.assertEqual(Composition("V2O3").oxi_state_guesses(), ({"V": 3, "O": -2},)) # all_oxidation_states produces *many* possible responses self.assertEqual(len(Composition("MnO").oxi_state_guesses( all_oxi_states=True)), 4) # can't balance b/c missing V4+ self.assertEqual(Composition("VO2").oxi_state_guesses( oxi_states_override={"V": [2, 3, 5]}), []) # missing V4+, but can balance due to additional sites self.assertEqual(Composition("V2O4").oxi_state_guesses( oxi_states_override={"V": [2, 3, 5]}), ({"V": 4, "O": -2},)) # multiple solutions - Mn/Fe = 2+/4+ or 3+/3+ or 4+/2+ self.assertEqual(len(Composition("MnFeO3").oxi_state_guesses( oxi_states_override={"Mn": [2, 3, 4], "Fe": [2, 3, 4]})), 3) # multiple solutions prefers 3/3 over 2/4 or 4/2 self.assertEqual(Composition("MnFeO3").oxi_state_guesses( oxi_states_override={"Mn": [2, 3, 4], "Fe": [2, 3, 4]})[0], {"Mn": 3, "Fe": 3, "O": -2}) # target charge of 1 self.assertEqual(Composition("V2O6").oxi_state_guesses( oxi_states_override={"V": [2, 3, 4, 5]}, target_charge=-2), ({"V": 5, "O": -2},)) # max_sites for very large composition - should timeout if incorrect self.assertEqual(Composition("Li10000Fe10000P10000O40000"). oxi_state_guesses(max_sites=7)[0], {"Li": 1, "Fe": 2, "P": 5, "O": -2}) # max_sites for very large composition - should timeout if incorrect self.assertEqual(Composition("Li10000Fe10000P10000O40000"). oxi_state_guesses(max_sites=-1)[0], {"Li": 1, "Fe": 2, "P": 5, "O": -2}) # negative max_sites less than -1 - should throw error if cannot reduce # to under the abs(max_sites) number of sites. Will also timeout if # incorrect. self.assertEqual( Composition("Sb10000O10000F10000").oxi_state_guesses( max_sites=-3)[0], {"Sb": 3, "O": -2, "F": -1}) self.assertRaises(ValueError, Composition("LiOF").oxi_state_guesses, max_sites=-2) self.assertRaises(ValueError, Composition("V2O3"). oxi_state_guesses, max_sites=1) def test_oxi_state_decoration(self): # Basic test: Get compositions where each element is in a single charge state decorated = Composition("H2O").add_charges_from_oxi_state_guesses() self.assertIn(Specie("H", 1), decorated) self.assertEqual(2, decorated.get(Specie("H", 1))) # Test: More than one charge state per element decorated = Composition("Fe3O4").add_charges_from_oxi_state_guesses() self.assertEqual(1, decorated.get(Specie("Fe", 2))) self.assertEqual(2, decorated.get(Specie("Fe", 3))) self.assertEqual(4, decorated.get(Specie("O", -2))) # Test: No possible charge states # It should return an uncharged composition decorated = Composition("NiAl").add_charges_from_oxi_state_guesses() self.assertEqual(1, decorated.get(Specie("Ni", 0))) self.assertEqual(1, decorated.get(Specie("Al", 0))) def test_Metallofullerene(self): # Test: Parse Metallofullerene formula (e.g. Y3N@C80) formula = "Y3N@C80" sym_dict = {"Y": 3, "N": 1, "C": 80} cmp = Composition(formula) cmp2 = Composition.from_dict(sym_dict) self.assertEqual(cmp, cmp2) def test_contains_element_type(self): formula = "EuTiO3" cmp = Composition(formula) self.assertTrue(cmp.contains_element_type("lanthanoid")) self.assertFalse(cmp.contains_element_type("noble_gas")) self.assertTrue(cmp.contains_element_type("f-block")) self.assertFalse(cmp.contains_element_type("s-block")) def test_chemical_system(self): formula = "NaCl" cmp = Composition(formula) self.assertEqual(cmp.chemical_system, "Cl-Na") def test_is_valid(self): formula = "NaCl" cmp = Composition(formula) self.assertTrue(cmp.valid) formula = "NaClX" cmp = Composition(formula) self.assertFalse(cmp.valid) self.assertRaises(ValueError, Composition, "NaClX", strict=True) def test_remove_charges(self): cmp1 = Composition({'Al3+': 2.0, 'O2-': 3.0}) cmp2 = Composition({'Al': 2.0, 'O': 3.0}) self.assertNotEqual(str(cmp1), str(cmp2)) cmp1 = cmp1.remove_charges() self.assertEqual(str(cmp1), str(cmp2)) cmp1 = cmp1.remove_charges() self.assertEqual(str(cmp1), str(cmp2)) cmp1 = Composition({'Fe3+': 2.0, 'Fe2+': 3.0, 'O2-': 6.0}) cmp2 = Composition({'Fe': 5.0, 'O': 6.0}) self.assertNotEqual(str(cmp1), str(cmp2)) cmp1 = cmp1.remove_charges() self.assertEqual(str(cmp1), str(cmp2)) class ChemicalPotentialTest(unittest.TestCase): def test_init(self): d = {'Fe': 1, Element('Fe'): 1} self.assertRaises(ValueError, ChemicalPotential, d) for k in ChemicalPotential(Fe=1).keys(): self.assertIsInstance(k, Element) def test_math(self): fepot = ChemicalPotential({'Fe': 1}) opot = ChemicalPotential({'O': 2.1}) pots = ChemicalPotential({'Fe': 1, 'O': 2.1}) potsx2 = ChemicalPotential({'Fe': 2, 'O': 4.2}) feo2 = Composition('FeO2') # test get_energy() self.assertAlmostEqual(pots.get_energy(feo2), 5.2) self.assertAlmostEqual(fepot.get_energy(feo2, False), 1) self.assertRaises(ValueError, fepot.get_energy, feo2) # test multiplication self.assertRaises(NotImplementedError, lambda: (pots * pots)) self.assertDictEqual(pots * 2, potsx2) self.assertDictEqual(2 * pots, potsx2) # test division self.assertDictEqual(potsx2 / 2, pots) self.assertRaises(NotImplementedError, lambda: (pots / pots)) self.assertRaises(NotImplementedError, lambda: (pots / feo2)) # test add/subtract self.assertDictEqual(pots + pots, potsx2) self.assertDictEqual(potsx2 - pots, pots) self.assertDictEqual(fepot + opot, pots) self.assertDictEqual(fepot - opot, pots - opot - opot) if __name__ == "__main__": unittest.main()
mbkumar/pymatgen
pymatgen/core/tests/test_composition.py
Python
mit
25,224
[ "pymatgen" ]
ad54531e2ea07096fbd4959f695936ff151034c2a7ea2f5fd53f6ebb0d9f1f09
#!/usr/bin/env python """ Creates graph of modeled time series from multiple files. See figures in the "Getting Started" section of the User's Manual. """ # example from grid-sequencing example: # $ ./tsshow.py ice_volume_glacierized ice_volume_glacierized-gridseq.png ts_g20km_10ka_hy.nc '20 km for 10 ka' ts_g10km_gridseq.nc '10 km for 2 ka' ts_g5km_gridseq.nc '5 km for 200 a' # example from paramstudy/: # $ ../tsshow.py ice_volume_glacierized ice_volume_glacierized-param.png ts_p10km_q0.1_e1.nc '(0.1,1)' ts_p10km_q0.5_e1.nc '(0.5,1)' ts_p10km_q1.0_e1.nc '(1.0,1)' ts_p10km_q0.1_e3.nc '(0.1,3)' ts_p10km_q0.5_e3.nc '(0.5,3)' ts_p10km_q1.0_e3.nc '(1.0,3)' ts_p10km_q0.1_e6.nc '(0.1,6)' ts_p10km_q0.5_e6.nc '(0.5,6)' ts_p10km_q1.0_e6.nc '(1.0,6)' from numpy import * import pylab as plt import sys try: import netCDF4 as netCDF except: print "netCDF4 is not installed!" sys.exit(1) NC = netCDF.Dataset if len(sys.argv) < 5: print "tsshow.py ERROR: at least 4 arguments needed" print "usage:" print print " $ python tsshow.py FIELD OUTIMAGE TSFILE1 LABEL1 ... TSFILEn LABELn" print print "where strings LABEL1 ... LABELn go in the legend" print "example:" print " $ python tsshow.py ice_volume_glacierized foo.png ts_g20km.nc '20 km' ts_g10km.nc '10 km'" sys.exit(1) field = sys.argv[1] outimage = sys.argv[2] legloc = 'lower right' secpera = 31556926.0 vfactor = 1.0e6 * 1.0e9 n = (len(sys.argv) - 3) / 2 labels = [] plt.figure(figsize=(9, 4)) style = ['b-', 'g-', 'r-', 'c-', 'm-', 'y-', 'k-', 'b--', 'g--', 'r--', 'c--', 'm--', 'y--', 'k--'] for k in range(n): tsfile = sys.argv[2 * k + 3] labels.append(sys.argv[2 * k + 4]) try: ncfile = NC(tsfile, "r") except: print "ERROR: can't read from file %s ..." % tsfile sys.exit(2) t = ncfile.variables["time"][:] / secpera var = ncfile.variables[field][:] ncfile.close() print "read variable '%s' from time-series file '%s' ..." % (field, tsfile) plt.plot(t, var / vfactor, style[k], linewidth=2.5) # automatic colors; default order # blue, green, red, cyan, magenta, ... plt.hold(True) plt.hold(False) plt.legend(labels, loc=legloc) plt.xlabel("t (years)", size=16) plt.ylabel("%s ($10^6$ km$^3$)" % field, size=16) plt.grid(True) print "saving image to file '%s' ..." % outimage # plt.show() plt.savefig(outimage, bbox_inches='tight')
talbrecht/pism_pik
examples/std-greenland/tsshow.py
Python
gpl-3.0
2,444
[ "NetCDF" ]
e19d2ad4e6adff3322241930ac771f508799f444e1191fae42251ea8e1bace40
# -*- coding: utf-8 -*- """QGIS Unit tests for edit widgets. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Matthias Kuhn' __date__ = '28/11/2015' __copyright__ = 'Copyright 2015, The QGIS Project' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import qgis import os from qgis.core import ( QgsFeature, QgsGeometry, QgsPoint, QgsVectorLayer, NULL, QgsProject, QgsRelation, QgsMapLayerRegistry, QgsTransaction, QgsFeatureRequest ) from qgis.gui import ( QgsEditorWidgetRegistry, QgsRelationWidgetWrapper, QgsAttributeEditorContext, QgsVectorLayerTools, QgsFeatureListView ) from PyQt.QtCore import ( QTimer ) from PyQt.QtWidgets import ( QWidget, QToolButton, QTableView, QListView ) from PyQt.QtGui import ( QApplication ) from time import sleep from utilities import (unitTestDataPath, getQgisTestApp, TestCase, unittest, expectedFailure ) QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp() class TestQgsTextEditWidget(TestCase): @classmethod def setUpClass(cls): """ Setup the involved layers and relations for a n:m relation :return: """ QgsEditorWidgetRegistry.initEditors() cls.dbconn = u'dbname=\'qgis_test\' host=localhost port=5432 user=\'postgres\' password=\'postgres\'' if 'QGIS_PGTEST_DB' in os.environ: cls.dbconn = os.environ['QGIS_PGTEST_DB'] # Create test layer cls.vl_b = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."books" sql=', 'test', 'postgres') cls.vl_a = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."authors" sql=', 'test', 'postgres') cls.vl_link = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."books_authors" sql=', 'test', 'postgres') QgsMapLayerRegistry.instance().addMapLayer(cls.vl_b) QgsMapLayerRegistry.instance().addMapLayer(cls.vl_a) QgsMapLayerRegistry.instance().addMapLayer(cls.vl_link) relMgr = QgsProject.instance().relationManager() cls.rel_a = QgsRelation() cls.rel_a.setReferencingLayer(cls.vl_link.id()) cls.rel_a.setReferencedLayer(cls.vl_a.id()) cls.rel_a.addFieldPair('fk_author', 'pk') cls.rel_a.setRelationId('rel_a') assert(cls.rel_a.isValid()) relMgr.addRelation(cls.rel_a) cls.rel_b = QgsRelation() cls.rel_b.setReferencingLayer(cls.vl_link.id()) cls.rel_b.setReferencedLayer(cls.vl_b.id()) cls.rel_b.addFieldPair('fk_book', 'pk') cls.rel_b.setRelationId('rel_b') assert(cls.rel_b.isValid()) relMgr.addRelation(cls.rel_b) # Our mock QgsVectorLayerTools, that allow to inject data where user input is expected cls.vltools = VlTools() assert(cls.vl_a.isValid()) assert(cls.vl_b.isValid()) assert(cls.vl_link.isValid()) def setUp(self): self.startTransaction() def tearDown(self): self.rollbackTransaction() def test_delete_feature(self): """ Check if a feature can be deleted properly """ self.createWrapper(self.vl_a, '"name"=\'Erich Gamma\'') self.assertEquals(self.table_view.model().rowCount(), 1) self.assertEquals(1, len([f for f in self.vl_b.getFeatures()])) fid = self.vl_b.getFeatures(QgsFeatureRequest().setFilterExpression('"name"=\'Design Patterns. Elements of Reusable Object-Oriented Software\'')).next().id() self.widget.featureSelectionManager().select([fid]) btn = self.widget.findChild(QToolButton, 'mDeleteFeatureButton') btn.click() # This is the important check that the feature is deleted self.assertEquals(0, len([f for f in self.vl_b.getFeatures()])) # This is actually more checking that the database on delete action is properly set on the relation self.assertEquals(0, len([f for f in self.vl_link.getFeatures()])) self.assertEquals(self.table_view.model().rowCount(), 0) def test_list(self): """ Simple check if several related items are shown """ wrapper = self.createWrapper(self.vl_b) self.assertEquals(self.table_view.model().rowCount(), 4) @expectedFailure def test_add_feature(self): """ Check if a new related feature is added """ self.createWrapper(self.vl_a, '"name"=\'Douglas Adams\'') self.assertEquals(self.table_view.model().rowCount(), 0) self.vltools.setValues([None, 'The Hitchhiker\'s Guide to the Galaxy']) btn = self.widget.findChild(QToolButton, 'mAddFeatureButton') btn.click() # Book entry has been created self.assertEquals(2, len([f for f in self.vl_b.getFeatures()])) # Link entry has been created self.assertEquals(5, len([f for f in self.vl_link.getFeatures()])) self.assertEquals(self.table_view.model().rowCount(), 1) def test_link_feature(self): """ Check if an existing feature can be linked """ wrapper = self.createWrapper(self.vl_a, '"name"=\'Douglas Adams\'') f = QgsFeature(self.vl_b.fields()) f.setAttributes([self.vl_b.dataProvider().defaultValue(0), 'The Hitchhiker\'s Guide to the Galaxy']) self.vl_b.addFeature(f) def choose_linked_feature(): dlg = QApplication.activeModalWidget() dlg.setSelectedFeatures([f.id()]) dlg.accept() btn = self.widget.findChild(QToolButton, 'mLinkFeatureButton') timer = QTimer() timer.setSingleShot(True) timer.setInterval(0) # will run in the event loop as soon as it's processed when the dialog is opened timer.timeout.connect(choose_linked_feature) timer.start() btn.click() # magically the above code selects the feature here... link_feature = self.vl_link.getFeatures(QgsFeatureRequest().setFilterExpression('"fk_book"={}'.format(f[0]))).next() self.assertIsNotNone(link_feature[0]) self.assertEquals(self.table_view.model().rowCount(), 1) @expectedFailure def test_unlink_feature(self): """ Check if a linked feature can be unlinked """ wrapper = self.createWrapper(self.vl_b) wdg = wrapper.widget() # All authors are listed self.assertEquals(self.table_view.model().rowCount(), 4) it = self.vl_a.getFeatures( QgsFeatureRequest().setFilterExpression('"name" IN (\'Richard Helm\', \'Ralph Johnson\')')) self.widget.featureSelectionManager().select([f.id() for f in it]) btn = self.widget.findChild(QToolButton, 'mUnlinkFeatureButton') btn.click() # This is actually more checking that the database on delete action is properly set on the relation self.assertEquals(2, len([f for f in self.vl_link.getFeatures()])) self.assertEquals(2, self.table_view.model().rowCount()) def startTransaction(self): """ Start a new transaction and set all layers into transaction mode. :return: None """ lyrs = [self.vl_a, self.vl_b, self.vl_link] self.transaction = QgsTransaction.create([l.id() for l in lyrs]) self.transaction.begin() for l in lyrs: l.startEditing() def rollbackTransaction(self): """ Rollback all changes done in this transaction. We always rollback and never commit to have the database in a pristine state at the end of each test. :return: None """ lyrs = [self.vl_a, self.vl_b, self.vl_link] for l in lyrs: l.commitChanges() self.transaction.rollback() def createWrapper(self, layer, filter=None): """ Basic setup of a relation widget wrapper. Will create a new wrapper and set its feature to the one and only book in the table. It will also assign some instance variables to help * self.widget The created widget * self.table_view The table view of the widget :return: The created wrapper """ if layer == self.vl_b: relation = self.rel_b nmrel = self.rel_a else: relation = self.rel_a nmrel = self.rel_b parent = QWidget() self.wrapper = QgsRelationWidgetWrapper(layer, relation) self.wrapper.setConfig({'nm-rel': nmrel.id()}) context = QgsAttributeEditorContext() context.setVectorLayerTools(self.vltools) self.wrapper.setContext(context) self.widget = self.wrapper.widget() self.widget.show() request = QgsFeatureRequest() if filter: request.setFilterExpression(filter) book = layer.getFeatures(request).next() self.wrapper.setFeature(book) self.table_view = self.widget.findChild(QTableView) return self.wrapper class VlTools(QgsVectorLayerTools): """ Mock the QgsVectorLayerTools Since we don't have a user on the test server to input this data for us, we can just use this. """ def setValues(self, values): """ Set the values for the next feature to insert :param values: An array of values that shall be used for the next inserted record :return: None """ self.values = values def addFeature(self, layer, defaultValues, defaultGeometry): """ Overrides the addFeature method :param layer: vector layer :param defaultValues: some default values that may be provided by QGIS :param defaultGeometry: a default geometry that may be provided by QGIS :return: tuple(ok, f) where ok is if the layer added the feature and f is the added feature """ values = list() for i, v in enumerate(self.values): if v: values.append(v) else: values.append(layer.dataProvider().defaultValue(i)) f = QgsFeature(layer.fields()) f.setAttributes(self.values) f.setGeometry(defaultGeometry) ok = layer.addFeature(f) return ok, f def startEditing(self, layer): pass def stopEditing(self, layer, allowCancel): pass def saveEdits(self, layer): pass if __name__ == '__main__': unittest.main()
supergis/QGIS
tests/src/python/test_qgsrelationeditwidget.py
Python
gpl-2.0
10,897
[ "Galaxy" ]
066f5b76e94591ec0801dfbd38049d0089a7c2f419b47b13f29ba7ee5c415d28
from django.test import TestCase from push_notifications.api.rest_framework import APNSDeviceSerializer, GCMDeviceSerializer from rest_framework.serializers import ValidationError from tests.mock_responses import GCM_DRF_INVALID_HEX_ERROR, GCM_DRF_OUT_OF_RANGE_ERROR class APNSDeviceSerializerTestCase(TestCase): def test_validation(self): # valid data - 32 bytes upper case serializer = APNSDeviceSerializer(data={ "registration_id": "AEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAE", "name": "Apple iPhone 6+", "device_id": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", }) self.assertTrue(serializer.is_valid()) # valid data - 32 bytes lower case serializer = APNSDeviceSerializer(data={ "registration_id": "aeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeae", "name": "Apple iPhone 6+", "device_id": "ffffffffffffffffffffffffffffffff", }) self.assertTrue(serializer.is_valid()) # valid data - 100 bytes upper case serializer = APNSDeviceSerializer(data={ "registration_id": "AEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAE", "name": "Apple iPhone 6+", "device_id": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", }) self.assertTrue(serializer.is_valid()) # valid data - 100 bytes lower case serializer = APNSDeviceSerializer(data={ "registration_id": "aeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeae", "name": "Apple iPhone 6+", "device_id": "ffffffffffffffffffffffffffffffff", }) self.assertTrue(serializer.is_valid()) # invalid data - device_id, registration_id serializer = APNSDeviceSerializer(data={ "registration_id": "invalid device token contains no hex", "name": "Apple iPhone 6+", "device_id": "ffffffffffffffffffffffffffffake", }) self.assertFalse(serializer.is_valid()) self.assertEqual(serializer.errors["device_id"][0], '"ffffffffffffffffffffffffffffake" is not a valid UUID.') self.assertEqual(serializer.errors["registration_id"][0], "Registration ID (device token) is invalid") class GCMDeviceSerializerTestCase(TestCase): def test_device_id_validation_pass(self): serializer = GCMDeviceSerializer(data={ "registration_id": "foobar", "name": "Galaxy Note 3", "device_id": "0x1031af3b", }) self.assertTrue(serializer.is_valid()) def test_registration_id_unique(self): """Validate that a duplicate registration id raises a validation error.""" # add a device serializer = GCMDeviceSerializer(data={ "registration_id": "foobar", "name": "Galaxy Note 3", "device_id": "0x1031af3b", }) serializer.is_valid(raise_exception=True) obj = serializer.save() # ensure updating the same object works serializer = GCMDeviceSerializer(obj, data={ "registration_id": "foobar", "name": "Galaxy Note 5", "device_id": "0x1031af3b", }) serializer.is_valid(raise_exception=True) obj = serializer.save() # try to add a new device with the same token serializer = GCMDeviceSerializer(data={ "registration_id": "foobar", "name": "Galaxy Note 3", "device_id": "0xdeadbeaf", }) with self.assertRaises(ValidationError) as ex: serializer.is_valid(raise_exception=True) self.assertEqual({'registration_id': [u'This field must be unique.']}, ex.exception.detail) def test_device_id_validation_fail_bad_hex(self): serializer = GCMDeviceSerializer(data={ "registration_id": "foobar", "name": "Galaxy Note 3", "device_id": "0x10r", }) self.assertFalse(serializer.is_valid()) self.assertEqual(serializer.errors, GCM_DRF_INVALID_HEX_ERROR) def test_device_id_validation_fail_out_of_range(self): serializer = GCMDeviceSerializer(data={ "registration_id": "foobar", "name": "Galaxy Note 3", "device_id": "10000000000000000", # 2**64 }) self.assertFalse(serializer.is_valid()) self.assertEqual(serializer.errors, GCM_DRF_OUT_OF_RANGE_ERROR) def test_device_id_validation_value_between_signed_unsigned_64b_int_maximums(self): """ 2**63 < 0xe87a4e72d634997c < 2**64 """ serializer = GCMDeviceSerializer(data={ "registration_id": "foobar", "name": "Nexus 5", "device_id": "e87a4e72d634997c", }) self.assertTrue(serializer.is_valid())
gkirkpatrick/django-push-notifications
tests/test_rest_framework.py
Python
mit
4,471
[ "Galaxy" ]
d7a4f007fb09050a67a180a61579654eb61c1f54d3b5359702a6c0d2594ff1e8
""" Test class for agents """ # imports import unittest import json import os from DIRAC.WorkloadManagementSystem.PilotAgent.pilotTools import PilotParams, CommandBase from DIRAC.WorkloadManagementSystem.PilotAgent.pilotCommands import GetPilotVersion class PilotTestCase( unittest.TestCase ): """ Base class for the Agents test cases """ def setUp( self ): self.pp = PilotParams() def tearDown( self ): try: os.remove('pilot.out') os.remove( 'pilot.json' ) os.remove( 'pilot.json-local' ) except OSError: pass class CommandsTestCase( PilotTestCase ): def test_commandBase(self): cb = CommandBase(self.pp) returnCode, _outputData = cb.executeAndGetOutput("ls") self.assertEqual(returnCode, 0) def test_GetPilotVersion( self ): # Now defining a local file for test, and all the necessary parameters fp = open( 'pilot.json', 'w' ) json.dump( {'TestSetup':{'Version':['v1r1', 'v2r2']}}, fp ) fp.close() self.pp.setup = 'TestSetup' self.pp.pilotCFGFileLocation = 'file://%s' % os.getcwd() gpv = GetPilotVersion( self.pp ) self.assertIsNone( gpv.execute() ) self.assertEqual( gpv.pp.releaseVersion, 'v1r1' ) ############################################################################# # Test Suite run ############################################################################# if __name__ == '__main__': suite = unittest.defaultTestLoader.loadTestsFromTestCase( PilotTestCase ) suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( CommandsTestCase ) ) testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite ) # EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
andresailer/DIRAC
WorkloadManagementSystem/PilotAgent/test/Test_Pilot.py
Python
gpl-3.0
1,723
[ "DIRAC" ]
c40ab9aa448afed6d122fba77d2161b9e553d3f4089a97bb99a2e896b612b63c
from tests.test_helper import * class TestCreditCard(unittest.TestCase): def test_create_adds_credit_card_to_existing_customer(self): customer = Customer.create().customer result = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "cvv": "100", "cardholder_name": "John Doe" }) self.assertTrue(result.is_success) credit_card = result.credit_card self.assertTrue(re.search("\A\w{4,5}\Z", credit_card.token) != None) self.assertEquals("411111", credit_card.bin) self.assertEquals("1111", credit_card.last_4) self.assertEquals("05", credit_card.expiration_month) self.assertEquals("2009", credit_card.expiration_year) self.assertEquals("05/2009", credit_card.expiration_date) self.assertEquals("John Doe", credit_card.cardholder_name) def test_create_and_make_default(self): customer = Customer.create().customer card1 = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "cvv": "100", "cardholder_name": "John Doe" }).credit_card self.assertTrue(card1.default) card2 = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "cvv": "100", "cardholder_name": "John Doe", "options": {"make_default": True} }).credit_card card1 = CreditCard.find(card1.token) self.assertFalse(card1.default) self.assertTrue(card2.default) def test_create_with_expiration_month_and_year(self): customer = Customer.create().customer result = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_month": "05", "expiration_year": "2009", "cvv": "100", "cardholder_name": "John Doe" }) self.assertTrue(result.is_success) credit_card = result.credit_card self.assertEquals("05/2009", credit_card.expiration_date) def test_create_can_specify_the_desired_token(self): token = str(random.randint(1, 1000000)) customer = Customer.create().customer result = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "token": token }) self.assertTrue(result.is_success) credit_card = result.credit_card self.assertEquals(token, credit_card.token) def test_create_with_billing_address(self): customer = Customer.create().customer result = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "billing_address": { "street_address": "123 Abc Way", "locality": "Chicago", "region": "Illinois", "postal_code": "60622", "country_code_alpha2": "MX", "country_code_alpha3": "MEX", "country_code_numeric": "484", "country_name": "Mexico" } }) self.assertTrue(result.is_success) address = result.credit_card.billing_address self.assertEquals("123 Abc Way", address.street_address) self.assertEquals("Chicago", address.locality) self.assertEquals("Illinois", address.region) self.assertEquals("60622", address.postal_code) self.assertEquals("MX", address.country_code_alpha2) self.assertEquals("MEX", address.country_code_alpha3) self.assertEquals("484", address.country_code_numeric) self.assertEquals("Mexico", address.country_name) def test_create_with_billing_address_id(self): customer = Customer.create().customer address = Address.create({ "customer_id": customer.id, "street_address": "123 Abc Way" }).address result = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "billing_address_id": address.id }) self.assertTrue(result.is_success) billing_address = result.credit_card.billing_address self.assertEquals(address.id, billing_address.id) self.assertEquals("123 Abc Way", billing_address.street_address) def test_create_without_billing_address_still_has_billing_address_method(self): customer = Customer.create().customer result = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", }) self.assertTrue(result.is_success) self.assertEquals(None, result.credit_card.billing_address) def test_create_with_card_verification(self): customer = Customer.create().customer result = CreditCard.create({ "customer_id": customer.id, "number": "4000111111111115", "expiration_date": "05/2009", "options": {"verify_card": True} }) self.assertFalse(result.is_success) verification = result.credit_card_verification self.assertEquals(CreditCardVerification.Status.ProcessorDeclined, verification.status) self.assertEquals("2000", verification.processor_response_code) self.assertEquals("Do Not Honor", verification.processor_response_text) self.assertEquals("I", verification.cvv_response_code) self.assertEquals(None, verification.avs_error_response_code) self.assertEquals("I", verification.avs_postal_code_response_code) self.assertEquals("I", verification.avs_street_address_response_code) self.assertEquals(TestHelper.default_merchant_account_id, verification.merchant_account_id) def test_create_with_card_verification_and_non_default_merchant_account(self): customer = Customer.create().customer result = CreditCard.create({ "customer_id": customer.id, "number": "4000111111111115", "expiration_date": "05/2009", "options": { "verification_merchant_account_id": TestHelper.non_default_merchant_account_id, "verify_card": True } }) self.assertFalse(result.is_success) verification = result.credit_card_verification self.assertEquals(CreditCardVerification.Status.ProcessorDeclined, verification.status) self.assertEquals(None, verification.gateway_rejection_reason) self.assertEquals(TestHelper.non_default_merchant_account_id, verification.merchant_account_id) def test_verify_gateway_rejected_responds_to_processor_response_code(self): old_merchant_id = Configuration.merchant_id old_public_key = Configuration.public_key old_private_key = Configuration.private_key try: Configuration.merchant_id = "processing_rules_merchant_id" Configuration.public_key = "processing_rules_public_key" Configuration.private_key = "processing_rules_private_key" customer = Customer.create().customer result = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "billing_address": { "postal_code": "20000" }, "options": { "verify_card": True } }) self.assertFalse(result.is_success) self.assertEquals('1000', result.credit_card_verification.processor_response_code) self.assertEquals('Approved', result.credit_card_verification.processor_response_text) finally: Configuration.merchant_id = old_merchant_id Configuration.public_key = old_public_key Configuration.private_key = old_private_key def test_expose_gateway_rejection_reason_on_verification(self): old_merchant_id = Configuration.merchant_id old_public_key = Configuration.public_key old_private_key = Configuration.private_key try: Configuration.merchant_id = "processing_rules_merchant_id" Configuration.public_key = "processing_rules_public_key" Configuration.private_key = "processing_rules_private_key" customer = Customer.create().customer result = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "cvv": "200", "options": { "verify_card": True } }) self.assertFalse(result.is_success) verification = result.credit_card_verification self.assertEquals(Transaction.GatewayRejectionReason.Cvv, verification.gateway_rejection_reason) finally: Configuration.merchant_id = old_merchant_id Configuration.public_key = old_public_key Configuration.private_key = old_private_key def test_create_with_card_verification_set_to_false(self): customer = Customer.create().customer result = CreditCard.create({ "customer_id": customer.id, "number": "4000111111111115", "expiration_date": "05/2009", "options": {"verify_card": False} }) self.assertTrue(result.is_success) def test_create_with_invalid_invalid_options(self): customer = Customer.create().customer result = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "invalid_date", }) self.assertFalse(result.is_success) self.assertEquals(ErrorCodes.CreditCard.ExpirationDateIsInvalid, result.errors.for_object("credit_card").on("expiration_date")[0].code) self.assertEquals("Expiration date is invalid.", result.message) def test_create_with_invalid_country_codes(self): customer = Customer.create().customer result = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2012", "billing_address": { "country_code_alpha2": "ZZ", "country_code_alpha3": "ZZZ", "country_code_numeric": "000", "country_name": "zzzzzzz" } }) self.assertFalse(result.is_success) self.assertEquals( ErrorCodes.Address.CountryCodeAlpha2IsNotAccepted, result.errors.for_object("credit_card").for_object("billing_address").on("country_code_alpha2")[0].code ) self.assertEquals( ErrorCodes.Address.CountryCodeAlpha3IsNotAccepted, result.errors.for_object("credit_card").for_object("billing_address").on("country_code_alpha3")[0].code ) self.assertEquals( ErrorCodes.Address.CountryCodeNumericIsNotAccepted, result.errors.for_object("credit_card").for_object("billing_address").on("country_code_numeric")[0].code ) self.assertEquals( ErrorCodes.Address.CountryNameIsNotAccepted, result.errors.for_object("credit_card").for_object("billing_address").on("country_name")[0].code ) def test_update_with_valid_options(self): customer = Customer.create().customer credit_card = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "cvv": "100", "cardholder_name": "John Doe" }).credit_card result = CreditCard.update(credit_card.token, { "number": "5105105105105100", "expiration_date": "06/2010", "cvv": "123", "cardholder_name": "Jane Jones" }) self.assertTrue(result.is_success) credit_card = result.credit_card self.assertTrue(re.search("\A\w{4,5}\Z", credit_card.token) != None) self.assertEquals("510510", credit_card.bin) self.assertEquals("5100", credit_card.last_4) self.assertEquals("06", credit_card.expiration_month) self.assertEquals("2010", credit_card.expiration_year) self.assertEquals("06/2010", credit_card.expiration_date) self.assertEquals("Jane Jones", credit_card.cardholder_name) def test_update_billing_address_creates_new_by_default(self): customer = Customer.create().customer initial_credit_card = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "billing_address": { "street_address": "123 Nigeria Ave", } }).credit_card updated_credit_card = CreditCard.update(initial_credit_card.token, { "billing_address": { "region": "IL", "country_code_alpha2": "NG", "country_code_alpha3": "NGA", "country_code_numeric": "566", "country_name": "Nigeria" } }).credit_card self.assertEquals("IL", updated_credit_card.billing_address.region) self.assertEquals("NG", updated_credit_card.billing_address.country_code_alpha2) self.assertEquals("NGA", updated_credit_card.billing_address.country_code_alpha3) self.assertEquals("566", updated_credit_card.billing_address.country_code_numeric) self.assertEquals("Nigeria", updated_credit_card.billing_address.country_name) self.assertEquals(None, updated_credit_card.billing_address.street_address) self.assertNotEquals(initial_credit_card.billing_address.id, updated_credit_card.billing_address.id) def test_update_billing_address_when_update_existing_is_True(self): customer = Customer.create().customer initial_credit_card = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "billing_address": { "street_address": "123 Nigeria Ave", } }).credit_card updated_credit_card = CreditCard.update(initial_credit_card.token, { "billing_address": { "region": "IL", "options": { "update_existing": True } } }).credit_card self.assertEquals("IL", updated_credit_card.billing_address.region) self.assertEquals("123 Nigeria Ave", updated_credit_card.billing_address.street_address) self.assertEquals(initial_credit_card.billing_address.id, updated_credit_card.billing_address.id) def test_update_and_make_default(self): customer = Customer.create().customer card1 = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "cvv": "100", "cardholder_name": "John Doe" }).credit_card card2 = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "cvv": "100", "cardholder_name": "John Doe" }).credit_card self.assertTrue(card1.default) self.assertFalse(card2.default) result = CreditCard.update(card2.token, { "options": { "make_default": True } }) self.assertFalse(CreditCard.find(card1.token).default) self.assertTrue(CreditCard.find(card2.token).default) def test_update_verifies_card_if_option_is_provided(self): customer = Customer.create().customer credit_card = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "cvv": "100", "cardholder_name": "John Doe" }).credit_card result = CreditCard.update(credit_card.token, { "number": "4000111111111115", "expiration_date": "06/2010", "cvv": "123", "cardholder_name": "Jane Jones", "options": {"verify_card": True} }) self.assertFalse(result.is_success) self.assertEquals(CreditCardVerification.Status.ProcessorDeclined, result.credit_card_verification.status) def test_update_verifies_card_with_non_default_merchant_account(self): customer = Customer.create().customer credit_card = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "cvv": "100", "cardholder_name": "John Doe" }).credit_card result = CreditCard.update(credit_card.token, { "number": "4000111111111115", "expiration_date": "06/2010", "cvv": "123", "cardholder_name": "Jane Jones", "options": { "verification_merchant_account_id": TestHelper.non_default_merchant_account_id, "verify_card": True } }) self.assertFalse(result.is_success) self.assertEquals(CreditCardVerification.Status.ProcessorDeclined, result.credit_card_verification.status) def test_update_billing_address(self): customer = Customer.create().customer credit_card = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "billing_address": { "street_address": "321 Xyz Way", "locality": "Chicago", "region": "Illinois", "postal_code": "60621" } }).credit_card result = CreditCard.update(credit_card.token, { "billing_address": { "street_address": "123 Abc Way", "locality": "Chicago", "region": "Illinois", "postal_code": "60622" } }) self.assertTrue(result.is_success) address = result.credit_card.billing_address self.assertEquals("123 Abc Way", address.street_address) self.assertEquals("Chicago", address.locality) self.assertEquals("Illinois", address.region) self.assertEquals("60622", address.postal_code) def test_update_returns_error_if_invalid(self): customer = Customer.create().customer credit_card = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009" }).credit_card result = CreditCard.update(credit_card.token, { "expiration_date": "invalid_date" }) self.assertFalse(result.is_success) self.assertEquals(ErrorCodes.CreditCard.ExpirationDateIsInvalid, result.errors.for_object("credit_card").on("expiration_date")[0].code) def test_delete_with_valid_token(self): customer = Customer.create().customer credit_card = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009" }).credit_card result = CreditCard.delete(credit_card.token) self.assertTrue(result.is_success) @raises(NotFoundError) def test_delete_raises_error_when_deleting_twice(self): customer = Customer.create().customer credit_card = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009" }).credit_card CreditCard.delete(credit_card.token) CreditCard.delete(credit_card.token) @raises(NotFoundError) def test_delete_with_invalid_token(self): result = CreditCard.delete("notreal") def test_find_with_valid_token(self): customer = Customer.create().customer credit_card = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009" }).credit_card found_credit_card = CreditCard.find(credit_card.token) self.assertTrue(re.search("\A\w{4,5}\Z", credit_card.token) != None) self.assertEquals("411111", credit_card.bin) self.assertEquals("1111", credit_card.last_4) self.assertEquals("05", credit_card.expiration_month) self.assertEquals("2009", credit_card.expiration_year) self.assertEquals("05/2009", credit_card.expiration_date) def test_find_returns_associated_subsriptions(self): customer = Customer.create().customer credit_card = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009" }).credit_card id = "id_" + str(random.randint(1, 1000000)) subscription = Subscription.create({ "id": id, "plan_id": "integration_trialless_plan", "payment_method_token": credit_card.token, "price": Decimal("1.00") }).subscription found_credit_card = CreditCard.find(credit_card.token) self.assertEquals(id, found_credit_card.subscriptions[0].id) self.assertEquals(Decimal("1.00"), found_credit_card.subscriptions[0].price) self.assertEquals(credit_card.token, found_credit_card.subscriptions[0].payment_method_token) def test_find_with_invalid_token(self): try: CreditCard.find("bad_token") self.assertTrue(False) except Exception, e: self.assertEquals("payment method with token bad_token not found", str(e)) def test_create_from_transparent_redirect(self): customer = Customer.create().customer tr_data = { "credit_card": { "customer_id": customer.id } } post_params = { "tr_data": CreditCard.tr_data_for_create(tr_data, "http://example.com/path?foo=bar"), "credit_card[cardholder_name]": "Card Holder", "credit_card[number]": "4111111111111111", "credit_card[expiration_date]": "05/2012", "credit_card[billing_address][country_code_alpha2]": "MX", "credit_card[billing_address][country_code_alpha3]": "MEX", "credit_card[billing_address][country_code_numeric]": "484", "credit_card[billing_address][country_name]": "Mexico", } query_string = TestHelper.simulate_tr_form_post(post_params, CreditCard.transparent_redirect_create_url()) result = CreditCard.confirm_transparent_redirect(query_string) self.assertTrue(result.is_success) credit_card = result.credit_card self.assertEquals("411111", credit_card.bin) self.assertEquals("1111", credit_card.last_4) self.assertEquals("05", credit_card.expiration_month) self.assertEquals("2012", credit_card.expiration_year) self.assertEquals(customer.id, credit_card.customer_id) self.assertEquals("MX", credit_card.billing_address.country_code_alpha2) self.assertEquals("MEX", credit_card.billing_address.country_code_alpha3) self.assertEquals("484", credit_card.billing_address.country_code_numeric) self.assertEquals("Mexico", credit_card.billing_address.country_name) def test_create_from_transparent_redirect_and_make_default(self): customer = Customer.create().customer card1 = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", "cvv": "100", "cardholder_name": "John Doe" }).credit_card self.assertTrue(card1.default) tr_data = { "credit_card": { "customer_id": customer.id, "options": { "make_default": True } } } post_params = { "tr_data": CreditCard.tr_data_for_create(tr_data, "http://example.com/path?foo=bar"), "credit_card[cardholder_name]": "Card Holder", "credit_card[number]": "4111111111111111", "credit_card[expiration_date]": "05/2012", } query_string = TestHelper.simulate_tr_form_post(post_params, CreditCard.transparent_redirect_create_url()) card2 = CreditCard.confirm_transparent_redirect(query_string).credit_card self.assertFalse(CreditCard.find(card1.token).default) self.assertTrue(card2.default) def test_create_from_transparent_redirect_with_error_result(self): customer = Customer.create().customer tr_data = { "credit_card": { "customer_id": customer.id } } post_params = { "tr_data": CreditCard.tr_data_for_create(tr_data, "http://example.com/path"), "credit_card[cardholder_name]": "Card Holder", "credit_card[number]": "eleventy", "credit_card[expiration_date]": "y2k" } query_string = TestHelper.simulate_tr_form_post(post_params, CreditCard.transparent_redirect_create_url()) result = CreditCard.confirm_transparent_redirect(query_string) self.assertFalse(result.is_success) self.assertEquals( ErrorCodes.CreditCard.NumberHasInvalidLength, result.errors.for_object("credit_card").on("number")[0].code ) self.assertEquals( ErrorCodes.CreditCard.ExpirationDateIsInvalid, result.errors.for_object("credit_card").on("expiration_date")[0].code ) def test_update_from_transparent_redirect_with_successful_result(self): old_token = str(random.randint(1, 1000000)) new_token = str(random.randint(1, 1000000)) credit_card = Customer.create({ "credit_card": { "cardholder_name": "Old Cardholder Name", "number": "4111111111111111", "expiration_date": "05/2012", "token": old_token } }).customer.credit_cards[0] tr_data = { "payment_method_token": old_token, "credit_card": { "token": new_token } } post_params = { "tr_data": CreditCard.tr_data_for_update(tr_data, "http://example.com/path"), "credit_card[cardholder_name]": "New Cardholder Name", "credit_card[expiration_date]": "05/2014" } query_string = TestHelper.simulate_tr_form_post(post_params, CreditCard.transparent_redirect_update_url()) result = CreditCard.confirm_transparent_redirect(query_string) self.assertTrue(result.is_success) credit_card = result.credit_card self.assertEquals(new_token, credit_card.token) self.assertEquals("411111", credit_card.bin) self.assertEquals("1111", credit_card.last_4) self.assertEquals("05", credit_card.expiration_month) self.assertEquals("2014", credit_card.expiration_year) def test_update_from_transparent_redirect_and_make_default(self): customer = Customer.create({ "credit_card": { "number": "4111111111111111", "expiration_date": "05/2012" } }).customer card1 = customer.credit_cards[0] card2 = CreditCard.create({ "customer_id": customer.id, "number": "4111111111111111", "expiration_date": "05/2009", }).credit_card self.assertTrue(card1.default) self.assertFalse(card2.default) tr_data = { "payment_method_token": card2.token, "credit_card": { "options": { "make_default": True } } } post_params = { "tr_data": CreditCard.tr_data_for_update(tr_data, "http://example.com/path"), "credit_card[cardholder_name]": "New Cardholder Name", "credit_card[expiration_date]": "05/2014" } query_string = TestHelper.simulate_tr_form_post(post_params, CreditCard.transparent_redirect_update_url()) result = CreditCard.confirm_transparent_redirect(query_string) self.assertFalse(CreditCard.find(card1.token).default) self.assertTrue(CreditCard.find(card2.token).default) def test_update_from_transparent_redirect_and_update_existing_billing_address(self): customer = Customer.create({ "credit_card": { "number": "4111111111111111", "expiration_date": "05/2012", "billing_address": { "street_address": "123 Old St", "locality": "Chicago", "region": "Illinois", "postal_code": "60621" } } }).customer card = customer.credit_cards[0] tr_data = { "payment_method_token": card.token, "credit_card": { "billing_address": { "street_address": "123 New St", "locality": "Columbus", "region": "Ohio", "postal_code": "43215", "options": { "update_existing": True } } } } post_params = { "tr_data": CreditCard.tr_data_for_update(tr_data, "http://example.com/path") } query_string = TestHelper.simulate_tr_form_post(post_params, CreditCard.transparent_redirect_update_url()) result = CreditCard.confirm_transparent_redirect(query_string) self.assertEquals(1, len(Customer.find(customer.id).addresses)) updated_card = CreditCard.find(card.token) self.assertEquals("123 New St", updated_card.billing_address.street_address) self.assertEquals("Columbus", updated_card.billing_address.locality) self.assertEquals("Ohio", updated_card.billing_address.region) self.assertEquals("43215", updated_card.billing_address.postal_code) def test_update_from_transparent_redirect_with_error_result(self): old_token = str(random.randint(1, 1000000)) credit_card = Customer.create({ "credit_card": { "cardholder_name": "Old Cardholder Name", "number": "4111111111111111", "expiration_date": "05/2012", "token": old_token } }).customer.credit_cards[0] tr_data = { "payment_method_token": old_token, "credit_card": { "token": "bad token" } } post_params = { "tr_data": CreditCard.tr_data_for_update(tr_data, "http://example.com/path"), "credit_card[cardholder_name]": "New Cardholder Name", "credit_card[expiration_date]": "05/2014" } query_string = TestHelper.simulate_tr_form_post(post_params, CreditCard.transparent_redirect_update_url()) result = CreditCard.confirm_transparent_redirect(query_string) self.assertFalse(result.is_success) self.assertEquals( ErrorCodes.CreditCard.TokenInvalid, result.errors.for_object("credit_card").on("token")[0].code ) def test_expired_can_iterate_over_all_items(self): customer_id = Customer.all().first.id for i in range(110 - CreditCard.expired().maximum_size): CreditCard.create({ "customer_id": customer_id, "number": "4111111111111111", "expiration_date": "05/2009", "cvv": "100", "cardholder_name": "John Doe" }) collection = CreditCard.expired() self.assertTrue(collection.maximum_size > 100) credit_card_tokens = [credit_card.token for credit_card in collection.items] self.assertEquals(collection.maximum_size, len(TestHelper.unique(credit_card_tokens))) self.assertEquals(set([True]), TestHelper.unique([credit_card.is_expired for credit_card in collection.items])) def test_expiring_between(self): customer_id = Customer.all().first.id for i in range(110 - CreditCard.expiring_between(date(2010, 1, 1), date(2010, 12, 31)).maximum_size): CreditCard.create({ "customer_id": customer_id, "number": "4111111111111111", "expiration_date": "05/2010", "cvv": "100", "cardholder_name": "John Doe" }) collection = CreditCard.expiring_between(date(2010, 1, 1), date(2010, 12, 31)) self.assertTrue(collection.maximum_size > 100) credit_card_tokens = [credit_card.token for credit_card in collection.items] self.assertEquals(collection.maximum_size, len(TestHelper.unique(credit_card_tokens))) self.assertEquals(set(['2010']), TestHelper.unique([credit_card.expiration_year for credit_card in collection.items]))
eldarion/braintree_python
tests/integration/test_credit_card.py
Python
mit
33,947
[ "COLUMBUS" ]
7fdd48dce526ac15fb5df42e8f1fb309f7164f113bec6297ec7e209b788c0df3
import os def print_cli_help_message(): x = os.system("clear; clear;") print "\n\n\n<<<<<-------------------------COMMAND-LINE INTERFACE TO THIS MODULE------------------------->>>>>\n\n\tARGUMENTS LIST:" args_note=""" -d = database file path -d_t = table name in database -g = google domain [make sure its's http:// and not https://] -t = topic of query\n\t-i = single keyword in url -p = length of time_period (in days) -n = number of time periods -r = results per time period -m = max number of results per page -w_p = wait time between pages -w_s = wait time between searches -f = 'resume_from' date (in julian) """ print args_note+"\n\n\tEXAMPLE:" print ' $ python article_url_extract.py -d "GoogleSearchResults.db" -d_t "NetworkingCompaniesResearch" -g www.google.com -t "Cisco" -p 60 -n 2 -r 20 -w_p 180 -w_s 900' print "\n\n\n\n ###---------------------------------------------NOTES---------------------------------------------###\n\n" tp_note= """Increasing -p and decreasing -n to get more results: Possible problems you may encounter: > Your query is rather obscure, and is only returning 8 results out of the 70 you had requested per time_period. > Your query is returning a lot of results, but they are mostly repeats. > your query does not return any new results for the latest time_period. Solution #1: [steps] [1] double your time period (-p) and [2] halve the number of time periods (-n). (or any proportion which maintains the product of -n and -p) Explanation: 'time_period' and 'num_time_periods_remaining' (-p and -n respectively) ideally should not be changed after you have started extracting for a particular Topic. Concretely, their product should remain unchanged. e.g. suppose you've used: $ python article_url_extract.py -t "Cisco" -p 30 -n 48 -r 20 i.e. topic = Cisco time_period = 30 num_time_periods_remaining = 48 results_per_time_period = 70 Initially, this is well and good, and you should not have to run anything else (you may need to change your IP occasionally if you find that Google is blocking it). Make the following change to your command line arguments: -p 60 -n 24 i.e. time_period = 60 num_time_periods_remaining = 24 This will cause you to get more results per time_period, and the results will usually be ones you have not seen before. However, this trick only works if the product (time_period * num_time_periods_remaining) i.e. (-p * -n) stays the same across runs, as changing the product will change the total time over which you are collecting results. """ resume_note = """How to use -f (resume function): Possible problems you may encounter: > your query returns zero new results for the latest time_period, and then the program is terminated before the next run. > you think a particular time_period's urls were not captured correctly and you'd like to make another pass over the period. Solution #1: [steps] [1] use -f (resume_from) functionality to skip over time periods in later runs. Optionally: [1] double your time period (-p) and [2] halve the number of time periods (-n). Explanation: -f i.e. resume_from, is a command-line argument you can use to skip periods where zero UNIQUE urls were obtained. e.g. suppose you've used: $ python article_url_extract.py -t "Cisco" -p 30 -n 48 -r 20 i.e. topic = Cisco time_period = 30 num_time_periods_remaining = 48 results_per_time_period = 70 Suppose this query has already been running for some time, and the latest time_period was 250103-250133 (you should be able to find this in the search query printed to the command line). Suppose that for some reason, this returns zero UNIQUE results, e.g. all 70 out of 70 results were repeats. If the program exits at this point (which may happen because of some error or because you terminated it with Ctrl+C), it will resume from the period 250103-250133 again, which we know will return no unique resuls. To save time, we can skip over this period by setting resume_from = 250103 i.e. $ python article_url_extract.py -t "Cisco" -p 30 -n 48 -r 20 -f 250103 Now, we will continue getting results from the next period. You may also want to change the time period and number of time periods appropriately (See note: "Increasing -p and decreasing -n to get more results"). IMPORTANT: once you find a time period in which you DO get some UNIQUE urls, you should not use resume_from in subsequent runs. """ lotsa_results_note = """Getting LOTS of results in a friendly manner: This project was made to get lots of Google Search results in an automated manner. However, the purpose was never to spam or cheat Google Search, and the Google TOS explicitly outlaws screen scraping. That being said, I know what it's like to be a poor, broke college student who had to make a project that requires a lot of data but can't afford the Google Search licencing fees (if I remember, it was about $50 for 1000 results from Google's python API). There's no clear way to get around the fact that scraping violates Google's TOS. However, if you do it in a respectful way, that is not spamming, you should not have any problem collecting lots of data really quickly. If you do not heed this warning and spam anyway, you will probably get your IP blocked by Google, either temporarily for a few hours or (in case you don't back off) for a few days. Note: getting "blocked by Google" means your IP gets blocked when you try to run the script. In most cases, you can still use Google normally via your browser (unless you've REALLLY pissed them off). There are certain things you can do to reliably get LOTS of results with this application: [1] Set the wait time to reasonalbe limits: Personally, I've found a wait period of 180 seconds between pages (-w_p 180) and 900 seconds between searches (-w_s 900) NEVER gets my IP blocked, but this might be too slow for you. If you have lots of IP addresses at your disposal, you can make the wait time small and switch them quickly (switching IPs is not provided in this module). [2] Randomly select the wait time: The application does this automatically. Concretely, googlesearch.py uses the function do_some_waiting(), which sets wait_time = random.uniform(0.3*wait, 1.5*wait), i.e. between 1/3rd and 1.5 times the input wait time. This averages to something like 0.9 times the wait time. Once you factor in the time for writing to database, connecting to the browser, etc, your total wait time can between calculated as: Total wait time = (number of pages * wait per page) + (number of period * wait per search query) [where number of periods = number of queries]. [3] The -m argument: Like most Linux command-line arguments, -m is badly named. It stands for "max number of results per page". As you know, a normal Google Search result returns 10 results per page. However, you can change this by adding "&num=<some number>" to the end of a search result. This is essentially what -m does. Specifying -m is a great way to get a LOT of results. Like, a huuuuuge number of results. For example suppose your query is kind of obscure and has 1542 results. You realize that ALL of the results are important to you. You could either wait for 155 pages of results to load, or you could set "-m 100" and get the same in 16 page results. The max value of -m is 100. However, setting -m to 100 is a quick way to get your IP noticed, since only developers and hackers do it. If you have a few IP addresses to space, I would recommend it (while keeping the wait times to a reasonalbe limit or more). [4] Use Virtual machines: I personally found virtual machines a big help. Concretely, I used VirtualBox on which I initially created a single virtual machine, based on Lubuntu 15.10 (Lubuntu is Ubuntu but with a very light GUI) and about 600 MB of RAM. My host system was running Ubuntu 15.04. I set up the required Python modules (splinter+phantomjs or twill v0.9, jdcal and BeautifulSoup), and VPNOneClick. I then installed guest additions to VirtualBox [this was very annoying, took me almost a full day to get right], so that I could enable shared folders. I placed the entire code for googlesearch inside that single shared folder. Then, I turned on VPNOneClick, connected to a new IP, and ran the code. The results started getting saved in a database file inside the shared directory. Once I was sure that my Virtual machine was extracting Google Search results, I cloned the Virtual machine (I created linked clones, which meant that all the software is the same, and only the changes were saved separately; this meant incredible savings in disk space as compared to full clones). Each linked clone was about 5 MB, whereas the base Virtual machine was ~4 GB. All in all, I made 8 linked clones, which consumed 4.8 GB of RAM when they ran together. Each linked clone had access to the same shared directory and the same database file where all the results were stored, but I opened VPNOneClick on each VM and connected to a different IP. Then, I ran the same code files (in the shared folder) on each and every VM. I used two terminal windows per VM, all running queries on different topics (I could have run them on the same topic and used "-f" to start at particular locations, but I this would be difficult to keep track of). The result was that I was running 16 processes in parallel, on 8 different IP addresses, which was an 8-fold increase over what I had with no VMs. And none of my IPs got blocked by Google, because I was extracting respectfully, i.e. using arguments "-w_p 180 -w_s 900". This is the great thing about parallelism; if you plan it correctly, the performance increase is spectacular. [5] ***Use different IP addresses****: Any website you visit has access to your IP address; that's just how the IP networking protocol works. Google is no different; every query you send is tagged with your IP address, and if you start sending too many queries too fast, Google will know what's up and start blocking your IP. However, there's literally millions of IP addresses available. To tap into these, you need a VPN (Virtual Private Network), which lets you set up a connection to a PROXY SERVER. A proxy server is a server that obtains content-filled IP packets ON YOUR BEHALF, and then forwards them to you. Thus, a proxy server acts as a middleman who ensures that any website you visit does not have your real IP address, Google included. Before: (You) <====> (Google) | You get blocked if you query too much/too fast. Now: (You) <====> (proxy server) <====> (Google) | The proxy server gets blocked instead of you. A proxy server is remotely located, in places like Turkey, Russia, India, USA, UK etc. A problem with proxy servers is that each proxy server has only one IP, just like you do at home. So, if you manage to get that IP blocked by Google, you're back to square one. The solution to this problem is simple: just use lots of proxy servers! This is done most easily via a VPN, which is a software you run on your browser or your computer that has the configuration settings to connect to several proxy servers. VPNs are often paid services, but relatively cheap (like $5-10 a month). Here's a few articles about the best VPN services you can use: > https://www.bestvpn.com/blog/18736/5-best-free-vpn-2/ > http://www.pcmag.com/article2/0,2817,2390381,00.asp > https://www.bestvpn.com/blog/32729/5-best-vpns-for-windows-november-2015/ Most of the above were free with a data cap, or free for a certain time. As a poor, broke, LAZY college student, I didn't really feel like using any of them if it meant changing my setup, so I used VPNOneClick, which is (as of Jan 2016) free on Windows and Linux FOREVER with no data cap [the downside is that you do need to install an application from their site (www.vpnoneclick.com). This application is pretty annoying because it keeps crashing every once in a while and you have to manually restart it. But hey, free is free]. IMPORTANT: This application (googlesearch), uses your browser to get Google search results. If you have multiple browsers, you should preferably go for a VPN service that provides its own desktop application, since it's not guaranteed that this application will use a particular browser. [If you really want, you can hack the first few lines of perform_initial_google_search() in the file googlesearch.py, to force twill to use a particular user agent (check for your browser at http://whatsmyuseragent.com/)]. Now, once you've set up your VPN and connected to a new IP, you're good to go. You can check your new IP by manually Googling "what's my ip". """ results_per_page_note = """How to use -m (max number of results per page): As you know, a normal Google Search result returns 10 results per page. However, you can change this by adding "&num=<some number>" to the end of a search result. This is essentially what -m does. Specifying -m is a great way to get a LOT of results. For example suppose your query is kind of obscure and has 1542 results. You realize that ALL of the results are important to you. You could either wait for 155 pages of results to load, or you could set "-m 100" and get the same in 16 page results. The max value of -m is 100. However, setting -m to 100 is a quick way to get your IP noticed, since only developers and hackers do it. """ notes_dict = {4: lotsa_results_note, 3:results_per_page_note, 2: resume_note, 1: tp_note} for note_index in notes_dict: print " %s) %s\n\n\n\n"%(note_index, notes_dict[note_index])
ARDivekar/SearchDistribute
other/Legacy/cli_help.py
Python
mit
16,371
[ "VisIt" ]
cacff98321389ea9c10e40cc3691683231fbb6982a442bd11a0ad0b11a30077b
#!/usr/bin/python """ Copyright 2012 Paul Willworth <ioscode@gmail.com> This file is part of Galaxy Harvester. Galaxy Harvester is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Galaxy Harvester is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>. """ import os import sys import cgi import Cookie import dbSession import dbShared import MySQLdb import ghShared import ghLists from jinja2 import Environment, FileSystemLoader # Get current url try: url = os.environ['SCRIPT_NAME'] except KeyError: url = '' uiTheme = '' form = cgi.FieldStorage() # Get Cookies useCookies = 1 cookies = Cookie.SimpleCookie() try: cookies.load(os.environ['HTTP_COOKIE']) except KeyError: useCookies = 0 if useCookies: try: currentUser = cookies['userID'].value except KeyError: currentUser = '' try: loginResult = cookies['loginAttempt'].value except KeyError: loginResult = 'success' try: sid = cookies['gh_sid'].value except KeyError: sid = form.getfirst('gh_sid', '') try: uiTheme = cookies['uiTheme'].value except KeyError: uiTheme = '' else: currentUser = '' loginResult = form.getfirst('loginAttempt', '') sid = form.getfirst('gh_sid', '') # Get a session logged_state = 0 linkappend = '' disableStr = '' # escape input to prevent sql injection sid = dbShared.dbInsertSafe(sid) if loginResult == None: loginResult = 'success' sess = dbSession.getSession(sid, 2592000) if (sess != ''): logged_state = 1 currentUser = sess if (uiTheme == ''): uiTheme = dbShared.getUserAttr(currentUser, 'themeName') if (useCookies == 0): linkappend = 'gh_sid=' + sid else: disableStr = ' disabled=\'disabled\'' if (uiTheme == ''): uiTheme = 'crafter' pictureName = dbShared.getUserAttr(currentUser, 'pictureName') print 'Content-type: text/html\n' env = Environment(loader=FileSystemLoader('templates')) env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL template = env.get_template('alerts.html') print template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, planetList=ghLists.getPlanetList(), galaxyList=ghLists.getGalaxyList())
clreinki/GalaxyHarvester
myAlerts.py
Python
agpl-3.0
2,715
[ "Galaxy" ]
4008741e871a317bcba1be1f358f58abcc3b61dc4cb3f614a767961edd8288f2
#!/usr/bin/env python # Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from functools import reduce import numpy import scipy.linalg from pyscf import lib from pyscf import gto from pyscf.lib import logger from pyscf import lo from pyscf import __config__ BASE = getattr(__config__, 'BASE', 0) MAP_TOL = getattr(__config__, 'mo_mapping_mo_map_tol', 0.5) ORTH_METHOD = getattr(__config__, 'mo_mapping_mo_comps_orth_method', 'meta_lowdin') def mo_map(mol1, mo1, mol2, mo2, base=BASE, tol=.5): '''Given two orbitals, based on their overlap <i|j>, search all orbital-pairs which have significant overlap. Returns: Two lists. First list is the orbital-pair indices, second is the overlap value. ''' s = gto.intor_cross('int1e_ovlp', mol1, mol2) s = reduce(numpy.dot, (mo1.T, s, mo2)) idx = numpy.argwhere(abs(s) > tol) + base for i,j in idx: logger.info(mol1, '<mo-1|mo-2> %d %d %12.8f', i, j, s[i,j]) return idx, s def mo_1to1map(s): '''Given <i|j>, search for the 1-to-1 mapping between i and j. Returns: a list [j-close-to-i for i in <bra|] ''' s1 = abs(s) like_input = [] for i in range(s1.shape[0]): k = numpy.argmax(s1[i]) like_input.append(k) s1[:,k] = 0 return like_input def mo_comps(aolabels_or_baslst, mol, mo_coeff, cart=False, orth_method=ORTH_METHOD): '''Given AO(s), show how the AO(s) are distributed in MOs. Args: aolabels_or_baslst : filter function or AO labels or AO index If it's a function, the AO indices are the items for which the function return value is true. Kwargs: cart : bool whether the orbital coefficients are based on cartesian basis. orth_method : str The localization method to generated orthogonal AO upon which the AO contribution are computed. It can be one of 'meta_lowdin', 'lowdin' or 'nao'. Returns: A list of float to indicate the total contributions (normalized to 1) of localized AOs Examples: >>> from pyscf import gto, scf >>> from pyscf.tools import mo_mapping >>> mol = gto.M(atom='H 0 0 0; F 0 0 1', basis='6-31g') >>> mf = scf.RHF(mol).run() >>> comp = mo_mapping.mo_comps('F 2s', mol, mf.mo_coeff) >>> print('MO-id F-2s components') >>> for i,c in enumerate(comp): ... print('%-3d %.10f' % (i, c)) MO-id components 0 0.0000066344 1 0.8796915532 2 0.0590259826 3 0.0000000000 4 0.0000000000 5 0.0435028851 6 0.0155889103 7 0.0000000000 8 0.0000000000 9 0.0000822361 10 0.0021017982 ''' with lib.temporary_env(mol, cart=cart): assert(mo_coeff.shape[0] == mol.nao) s = mol.intor_symmetric('int1e_ovlp') lao = lo.orth.orth_ao(mol, orth_method, s=s) idx = gto.mole._aolabels2baslst(mol, aolabels_or_baslst) if len(idx) == 0: logger.warn(mol, 'Required orbitals are not found') mo1 = reduce(numpy.dot, (lao[:,idx].T, s, mo_coeff)) s1 = numpy.einsum('ki,ki->i', mo1, mo1) return s1 del(BASE, MAP_TOL, ORTH_METHOD)
gkc1000/pyscf
pyscf/tools/mo_mapping.py
Python
apache-2.0
3,885
[ "PySCF" ]
b95fbf054dbb34b5ca6119e4d960a351b9e513e7b6ee9c918187b8e89dd3cf21
""" Computes putative binding pockets on protein. """ from __future__ import print_function from __future__ import division from __future__ import unicode_literals __author__ = "Bharath Ramsundar" __copyright__ = "Copyright 2017, Stanford University" __license__ = "MIT" import os import tempfile import numpy as np from rdkit import Chem from subprocess import call from scipy.spatial import ConvexHull from deepchem.feat import hydrogenate_and_compute_partial_charges from deepchem.feat.atomic_coordinates import AtomicCoordinates from deepchem.feat.binding_pocket_features import BindingPocketFeaturizer from deepchem.feat.fingerprints import CircularFingerprint from deepchem.models.sklearn_models import SklearnModel from deepchem.data.datasets import NumpyDataset from deepchem.utils import rdkit_util def extract_active_site(protein_file, ligand_file, cutoff=4): """Extracts a box for the active site.""" protein_coords = rdkit_util.load_molecule( protein_file, add_hydrogens=False)[0] ligand_coords = rdkit_util.load_molecule( ligand_file, add_hydrogens=True, calc_charges=True)[0] num_ligand_atoms = len(ligand_coords) num_protein_atoms = len(protein_coords) pocket_inds = [] pocket_atoms = set([]) for lig_atom_ind in range(num_ligand_atoms): lig_atom = ligand_coords[lig_atom_ind] for protein_atom_ind in range(num_protein_atoms): protein_atom = protein_coords[protein_atom_ind] if np.linalg.norm(lig_atom - protein_atom) < cutoff: if protein_atom_ind not in pocket_atoms: pocket_atoms = pocket_atoms.union(set([protein_atom_ind])) # Should be an array of size (n_pocket_atoms, 3) pocket_atoms = list(pocket_atoms) n_pocket_atoms = len(pocket_atoms) pocket_coords = np.zeros((n_pocket_atoms, 3)) for ind, pocket_ind in enumerate(pocket_atoms): pocket_coords[ind] = protein_coords[pocket_ind] x_min = int(np.floor(np.amin(pocket_coords[:, 0]))) x_max = int(np.ceil(np.amax(pocket_coords[:, 0]))) y_min = int(np.floor(np.amin(pocket_coords[:, 1]))) y_max = int(np.ceil(np.amax(pocket_coords[:, 1]))) z_min = int(np.floor(np.amin(pocket_coords[:, 2]))) z_max = int(np.ceil(np.amax(pocket_coords[:, 2]))) return (((x_min, x_max), (y_min, y_max), (z_min, z_max)), pocket_atoms, pocket_coords) def compute_overlap(mapping, box1, box2): """Computes overlap between the two boxes. Overlap is defined as % atoms of box1 in box2. Note that overlap is not a symmetric measurement. """ atom1 = set(mapping[box1]) atom2 = set(mapping[box2]) return len(atom1.intersection(atom2)) / float(len(atom1)) def get_all_boxes(coords, pad=5): """Get all pocket boxes for protein coords. We pad all boxes the prescribed number of angstroms. TODO(rbharath): It looks like this may perhaps be non-deterministic? """ hull = ConvexHull(coords) boxes = [] for triangle in hull.simplices: # coords[triangle, 0] gives the x-dimension of all triangle points # Take transpose to make sure rows correspond to atoms. points = np.array( [coords[triangle, 0], coords[triangle, 1], coords[triangle, 2]]).T # We voxelize so all grids have integral coordinates (convenience) x_min, x_max = np.amin(points[:, 0]), np.amax(points[:, 0]) x_min, x_max = int(np.floor(x_min)) - pad, int(np.ceil(x_max)) + pad y_min, y_max = np.amin(points[:, 1]), np.amax(points[:, 1]) y_min, y_max = int(np.floor(y_min)) - pad, int(np.ceil(y_max)) + pad z_min, z_max = np.amin(points[:, 2]), np.amax(points[:, 2]) z_min, z_max = int(np.floor(z_min)) - pad, int(np.ceil(z_max)) + pad boxes.append(((x_min, x_max), (y_min, y_max), (z_min, z_max))) return boxes def boxes_to_atoms(atom_coords, boxes): """Maps each box to a list of atoms in that box. TODO(rbharath): This does a num_atoms x num_boxes computations. Is there a reasonable heuristic we can use to speed this up? """ mapping = {} for box_ind, box in enumerate(boxes): box_atoms = [] (x_min, x_max), (y_min, y_max), (z_min, z_max) = box print("Handing box %d/%d" % (box_ind, len(boxes))) for atom_ind in range(len(atom_coords)): atom = atom_coords[atom_ind] x_cont = x_min <= atom[0] and atom[0] <= x_max y_cont = y_min <= atom[1] and atom[1] <= y_max z_cont = z_min <= atom[2] and atom[2] <= z_max if x_cont and y_cont and z_cont: box_atoms.append(atom_ind) mapping[box] = box_atoms return mapping def merge_boxes(box1, box2): """Merges two boxes.""" (x_min1, x_max1), (y_min1, y_max1), (z_min1, z_max1) = box1 (x_min2, x_max2), (y_min2, y_max2), (z_min2, z_max2) = box2 x_min = min(x_min1, x_min2) y_min = min(y_min1, y_min2) z_min = min(z_min1, z_min2) x_max = max(x_max1, x_max2) y_max = max(y_max1, y_max2) z_max = max(z_max1, z_max2) return ((x_min, x_max), (y_min, y_max), (z_min, z_max)) def merge_overlapping_boxes(mapping, boxes, threshold=.8): """Merge boxes which have an overlap greater than threshold. TODO(rbharath): This merge code is terribly inelegant. It's also quadratic in number of boxes. It feels like there ought to be an elegant divide and conquer approach here. Figure out later... """ num_boxes = len(boxes) outputs = [] for i in range(num_boxes): box = boxes[0] new_boxes = [] new_mapping = {} # If overlap of box with previously generated output boxes, return contained = False for output_box in outputs: # Carry forward mappings new_mapping[output_box] = mapping[output_box] if compute_overlap(mapping, box, output_box) == 1: contained = True if contained: continue # We know that box has at least one atom not in outputs unique_box = True for merge_box in boxes[1:]: overlap = compute_overlap(mapping, box, merge_box) if overlap < threshold: new_boxes.append(merge_box) new_mapping[merge_box] = mapping[merge_box] else: # Current box has been merged into box further down list. # No need to output current box unique_box = False merged = merge_boxes(box, merge_box) new_boxes.append(merged) new_mapping[merged] = list( set(mapping[box]).union(set(mapping[merge_box]))) if unique_box: outputs.append(box) new_mapping[box] = mapping[box] boxes = new_boxes mapping = new_mapping return outputs, mapping class BindingPocketFinder(object): """Abstract superclass for binding pocket detectors""" def find_pockets(self, protein_file, ligand_file): """Finds potential binding pockets in proteins.""" raise NotImplementedError class ConvexHullPocketFinder(BindingPocketFinder): """Implementation that uses convex hull of protein to find pockets. Based on https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4112621/pdf/1472-6807-14-18.pdf """ def __init__(self, pad=5): self.pad = pad def find_all_pockets(self, protein_file): """Find list of binding pockets on protein.""" # protein_coords is (N, 3) tensor coords = rdkit_util.load_molecule(protein_file)[0] return get_all_boxes(coords, self.pad) def find_pockets(self, protein_file, ligand_file): """Find list of suitable binding pockets on protein.""" protein_coords = rdkit_util.load_molecule( protein_file, add_hydrogens=False, calc_charges=False)[0] ligand_coords = rdkit_util.load_molecule( ligand_file, add_hydrogens=False, calc_charges=False)[0] boxes = get_all_boxes(protein_coords, self.pad) mapping = boxes_to_atoms(protein_coords, boxes) pockets, pocket_atoms_map = merge_overlapping_boxes(mapping, boxes) pocket_coords = [] for pocket in pockets: atoms = pocket_atoms_map[pocket] coords = np.zeros((len(atoms), 3)) for ind, atom in enumerate(atoms): coords[ind] = protein_coords[atom] pocket_coords.append(coords) return pockets, pocket_atoms_map, pocket_coords class RFConvexHullPocketFinder(BindingPocketFinder): """Uses pre-trained RF model + ConvexHulPocketFinder to select pockets.""" def __init__(self, pad=5): self.pad = pad self.convex_finder = ConvexHullPocketFinder(pad) # Load binding pocket model self.base_dir = tempfile.mkdtemp() print("About to download trained model.") # TODO(rbharath): Shift refined to full once trained. call(( "wget -c http://deepchem.io.s3-website-us-west-1.amazonaws.com/trained_models/pocket_random_refined_RF.tar.gz" ).split()) call(("tar -zxvf pocket_random_refined_RF.tar.gz").split()) call(("mv pocket_random_refined_RF %s" % (self.base_dir)).split()) self.model_dir = os.path.join(self.base_dir, "pocket_random_refined_RF") # Fit model on dataset self.model = SklearnModel(model_dir=self.model_dir) self.model.reload() # Create featurizers self.pocket_featurizer = BindingPocketFeaturizer() self.ligand_featurizer = CircularFingerprint(size=1024) def find_pockets(self, protein_file, ligand_file): """Compute features for a given complex TODO(rbharath): This has a log of code overlap with compute_binding_pocket_features in examples/binding_pockets/binding_pocket_datasets.py. Find way to refactor to avoid code duplication. """ # if not ligand_file.endswith(".sdf"): # raise ValueError("Only .sdf ligand files can be featurized.") # ligand_basename = os.path.basename(ligand_file).split(".")[0] # ligand_mol2 = os.path.join( # self.base_dir, ligand_basename + ".mol2") # # # Write mol2 file for ligand # obConversion = ob.OBConversion() # conv_out = obConversion.SetInAndOutFormats(str("sdf"), str("mol2")) # ob_mol = ob.OBMol() # obConversion.ReadFile(ob_mol, str(ligand_file)) # obConversion.WriteFile(ob_mol, str(ligand_mol2)) # # # Featurize ligand # mol = Chem.MolFromMol2File(str(ligand_mol2), removeHs=False) # if mol is None: # return None, None # # Default for CircularFingerprint # n_ligand_features = 1024 # ligand_features = self.ligand_featurizer.featurize([mol]) # # # Featurize pocket # pockets, pocket_atoms_map, pocket_coords = self.convex_finder.find_pockets( # protein_file, ligand_file) # n_pockets = len(pockets) # n_pocket_features = BindingPocketFeaturizer.n_features # # features = np.zeros((n_pockets, n_pocket_features+n_ligand_features)) # pocket_features = self.pocket_featurizer.featurize( # protein_file, pockets, pocket_atoms_map, pocket_coords) # # Note broadcast operation # features[:, :n_pocket_features] = pocket_features # features[:, n_pocket_features:] = ligand_features # dataset = NumpyDataset(X=features) # pocket_preds = self.model.predict(dataset) # pocket_pred_proba = np.squeeze(self.model.predict_proba(dataset)) # # # Find pockets which are active # active_pockets = [] # active_pocket_atoms_map = {} # active_pocket_coords = [] # for pocket_ind in range(len(pockets)): # #################################################### DEBUG # # TODO(rbharath): For now, using a weak cutoff. Fix later. # #if pocket_preds[pocket_ind] == 1: # if pocket_pred_proba[pocket_ind][1] > .15: # #################################################### DEBUG # pocket = pockets[pocket_ind] # active_pockets.append(pocket) # active_pocket_atoms_map[pocket] = pocket_atoms_map[pocket] # active_pocket_coords.append(pocket_coords[pocket_ind]) # return active_pockets, active_pocket_atoms_map, active_pocket_coords # # TODO(LESWING) raise ValueError("Karl Implement")
joegomes/deepchem
deepchem/dock/binding_pocket.py
Python
mit
11,738
[ "RDKit" ]
c11246b2483e11913176173725fe3ed296350cf79346db55c52d391c0ff1db20
""" Acceptance tests for Studio related to the container page. The container page is used both for displaying units, and for displaying containers within units. """ from nose.plugins.attrib import attr from unittest import skip from common.test.acceptance.fixtures.course import XBlockFixtureDesc from common.test.acceptance.pages.studio.component_editor import ComponentEditorView, ComponentVisibilityEditorView from common.test.acceptance.pages.studio.container import ContainerPage from common.test.acceptance.pages.studio.html_component_editor import HtmlComponentEditorView from common.test.acceptance.pages.studio.utils import add_discussion, drag from common.test.acceptance.pages.lms.courseware import CoursewarePage from common.test.acceptance.pages.lms.staff_view import StaffPage from common.test.acceptance.tests.helpers import create_user_partition_json import datetime from bok_choy.promise import Promise, EmptyPromise from base_studio_test import ContainerBase from xmodule.partitions.partitions import Group class NestedVerticalTest(ContainerBase): def populate_course_fixture(self, course_fixture): """ Sets up a course structure with nested verticals. """ self.container_title = "" self.group_a = "Group A" self.group_b = "Group B" self.group_empty = "Group Empty" self.group_a_item_1 = "Group A Item 1" self.group_a_item_2 = "Group A Item 2" self.group_b_item_1 = "Group B Item 1" self.group_b_item_2 = "Group B Item 2" self.group_a_handle = 0 self.group_a_item_1_handle = 1 self.group_a_item_2_handle = 2 self.group_empty_handle = 3 self.group_b_handle = 4 self.group_b_item_1_handle = 5 self.group_b_item_2_handle = 6 self.group_a_item_1_action_index = 0 self.group_a_item_2_action_index = 1 self.duplicate_label = "Duplicate of '{0}'" self.discussion_label = "Discussion" course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('vertical', 'Test Unit').add_children( XBlockFixtureDesc('vertical', 'Test Container').add_children( XBlockFixtureDesc('vertical', 'Group A').add_children( XBlockFixtureDesc('html', self.group_a_item_1), XBlockFixtureDesc('html', self.group_a_item_2) ), XBlockFixtureDesc('vertical', 'Group Empty'), XBlockFixtureDesc('vertical', 'Group B').add_children( XBlockFixtureDesc('html', self.group_b_item_1), XBlockFixtureDesc('html', self.group_b_item_2) ) ) ) ) ) ) @skip("Flaky: 01/16/2015") @attr(shard=1) class DragAndDropTest(NestedVerticalTest): """ Tests of reordering within the container page. """ def drag_and_verify(self, source, target, expected_ordering): self.do_action_and_verify( lambda (container): drag(container, source, target, 40), expected_ordering ) def test_reorder_in_group(self): """ Drag Group A Item 2 before Group A Item 1. """ expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_2, self.group_a_item_1]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] self.drag_and_verify(self.group_a_item_2_handle, self.group_a_item_1_handle, expected_ordering) def test_drag_to_top(self): """ Drag Group A Item 1 to top level (outside of Group A). """ expected_ordering = [{self.container_title: [self.group_a_item_1, self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] self.drag_and_verify(self.group_a_item_1_handle, self.group_a_handle, expected_ordering) def test_drag_into_different_group(self): """ Drag Group B Item 1 into Group A (first element). """ expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_b_item_1, self.group_a_item_1, self.group_a_item_2]}, {self.group_b: [self.group_b_item_2]}, {self.group_empty: []}] self.drag_and_verify(self.group_b_item_1_handle, self.group_a_item_1_handle, expected_ordering) def test_drag_group_into_group(self): """ Drag Group B into Group A (first element). """ expected_ordering = [{self.container_title: [self.group_a, self.group_empty]}, {self.group_a: [self.group_b, self.group_a_item_1, self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] self.drag_and_verify(self.group_b_handle, self.group_a_item_1_handle, expected_ordering) def test_drag_after_addition(self): """ Add some components and then verify that drag and drop still works. """ group_a_menu = 0 def add_new_components_and_rearrange(container): # Add a video component to Group 1 add_discussion(container, group_a_menu) # Duplicate the first item in Group A container.duplicate(self.group_a_item_1_action_index) first_handle = self.group_a_item_1_handle # Drag newly added video component to top. drag(container, first_handle + 3, first_handle, 40) # Drag duplicated component to top. drag(container, first_handle + 2, first_handle, 40) duplicate_label = self.duplicate_label.format(self.group_a_item_1) expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [duplicate_label, self.discussion_label, self.group_a_item_1, self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] self.do_action_and_verify(add_new_components_and_rearrange, expected_ordering) @attr(shard=1) class AddComponentTest(NestedVerticalTest): """ Tests of adding a component to the container page. """ def add_and_verify(self, menu_index, expected_ordering): self.do_action_and_verify( lambda (container): add_discussion(container, menu_index), expected_ordering ) def test_add_component_in_group(self): group_b_menu = 2 expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_1, self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2, self.discussion_label]}, {self.group_empty: []}] self.add_and_verify(group_b_menu, expected_ordering) def test_add_component_in_empty_group(self): group_empty_menu = 1 expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_1, self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: [self.discussion_label]}] self.add_and_verify(group_empty_menu, expected_ordering) def test_add_component_in_container(self): container_menu = 3 expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b, self.discussion_label]}, {self.group_a: [self.group_a_item_1, self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] self.add_and_verify(container_menu, expected_ordering) @attr(shard=1) class DuplicateComponentTest(NestedVerticalTest): """ Tests of duplicating a component on the container page. """ def duplicate_and_verify(self, source_index, expected_ordering): self.do_action_and_verify( lambda (container): container.duplicate(source_index), expected_ordering ) def test_duplicate_first_in_group(self): duplicate_label = self.duplicate_label.format(self.group_a_item_1) expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_1, duplicate_label, self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] self.duplicate_and_verify(self.group_a_item_1_action_index, expected_ordering) def test_duplicate_second_in_group(self): duplicate_label = self.duplicate_label.format(self.group_a_item_2) expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_1, self.group_a_item_2, duplicate_label]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] self.duplicate_and_verify(self.group_a_item_2_action_index, expected_ordering) def test_duplicate_the_duplicate(self): first_duplicate_label = self.duplicate_label.format(self.group_a_item_1) second_duplicate_label = self.duplicate_label.format(first_duplicate_label) expected_ordering = [ {self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_1, first_duplicate_label, second_duplicate_label, self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []} ] def duplicate_twice(container): container.duplicate(self.group_a_item_1_action_index) container.duplicate(self.group_a_item_1_action_index + 1) self.do_action_and_verify(duplicate_twice, expected_ordering) @attr(shard=1) class DeleteComponentTest(NestedVerticalTest): """ Tests of deleting a component from the container page. """ def delete_and_verify(self, source_index, expected_ordering): self.do_action_and_verify( lambda (container): container.delete(source_index), expected_ordering ) def test_delete_first_in_group(self): expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] # Group A itself has a delete icon now, so item_1 is index 1 instead of 0. group_a_item_1_delete_index = 1 self.delete_and_verify(group_a_item_1_delete_index, expected_ordering) @attr(shard=1) class EditContainerTest(NestedVerticalTest): """ Tests of editing a container. """ def modify_display_name_and_verify(self, component): """ Helper method for changing a display name. """ modified_name = 'modified' self.assertNotEqual(component.name, modified_name) component.edit() component_editor = ComponentEditorView(self.browser, component.locator) component_editor.set_field_value_and_save('Display Name', modified_name) self.assertEqual(component.name, modified_name) def test_edit_container_on_unit_page(self): """ Test the "edit" button on a container appearing on the unit page. """ unit = self.go_to_unit_page() component = unit.xblocks[1] self.modify_display_name_and_verify(component) def test_edit_container_on_container_page(self): """ Test the "edit" button on a container appearing on the container page. """ container = self.go_to_nested_container_page() self.modify_display_name_and_verify(container) def test_edit_raw_html(self): """ Test the raw html editing functionality. """ modified_content = "<p>modified content</p>" #navigate to and open the component for editing unit = self.go_to_unit_page() container = unit.xblocks[1].go_to_container() component = container.xblocks[1].children[0] component.edit() html_editor = HtmlComponentEditorView(self.browser, component.locator) html_editor.set_content_and_save(modified_content, raw=True) #note we're expecting the <p> tags to have been removed self.assertEqual(component.student_content, "modified content") @attr(shard=3) class EditVisibilityModalTest(ContainerBase): """ Tests of the visibility settings modal for components on the unit page. """ VISIBILITY_LABEL_ALL = 'All Students and Staff' VISIBILITY_LABEL_SPECIFIC = 'Specific Content Groups' MISSING_GROUP_LABEL = 'Deleted Content Group\nContent group no longer exists. Please choose another or allow access to All Students and staff' VALIDATION_ERROR_LABEL = 'This component has validation issues.' VALIDATION_ERROR_MESSAGE = 'Error:\nThis component refers to deleted or invalid content groups.' GROUP_VISIBILITY_MESSAGE = 'Some content in this unit is visible only to particular content groups' def setUp(self): super(EditVisibilityModalTest, self).setUp() # Set up a cohort-schemed user partition self.course_fixture._update_xblock(self.course_fixture._course_location, { "metadata": { u"user_partitions": [ create_user_partition_json( 0, 'Configuration Dogs, Cats', 'Content Group Partition', [Group("0", 'Dogs'), Group("1", 'Cats')], scheme="cohort" ) ], }, }) self.container_page = self.go_to_unit_page() self.html_component = self.container_page.xblocks[1] def populate_course_fixture(self, course_fixture): """ Populate a simple course a section, subsection, and unit, and HTML component. """ course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('vertical', 'Test Unit').add_children( XBlockFixtureDesc('html', 'Html Component') ) ) ) ) def edit_component_visibility(self, component): """ Edit the visibility of an xblock on the container page. """ component.edit_visibility() return ComponentVisibilityEditorView(self.browser, component.locator) def verify_selected_labels(self, visibility_editor, expected_labels): """ Verify that a visibility editor's selected labels match the expected ones. """ # If anything other than 'All Students and Staff', is selected, # 'Specific Content Groups' should be selected as well. if expected_labels != [self.VISIBILITY_LABEL_ALL]: expected_labels.append(self.VISIBILITY_LABEL_SPECIFIC) self.assertItemsEqual(expected_labels, [option.text for option in visibility_editor.selected_options]) def select_and_verify_saved(self, component, labels, expected_labels=None): """ Edit the visibility of an xblock on the container page and verify that the edit persists. If provided, verify that `expected_labels` are selected after save, otherwise expect that `labels` are selected after save. Note that `labels` are labels which should be clicked, but not necessarily checked. """ if expected_labels is None: expected_labels = labels # Make initial edit(s) and save visibility_editor = self.edit_component_visibility(component) for label in labels: visibility_editor.select_option(label, save=False) visibility_editor.save() # Re-open the modal and inspect its selected inputs visibility_editor = self.edit_component_visibility(component) self.verify_selected_labels(visibility_editor, expected_labels) visibility_editor.save() def verify_component_validation_error(self, component): """ Verify that we see validation errors for the given component. """ self.assertTrue(component.has_validation_error) self.assertEqual(component.validation_error_text, self.VALIDATION_ERROR_LABEL) self.assertEqual([self.VALIDATION_ERROR_MESSAGE], component.validation_error_messages) def verify_visibility_set(self, component, is_set): """ Verify that the container page shows that component visibility settings have been edited if `is_set` is True; otherwise verify that the container page shows no such information. """ if is_set: self.assertIn(self.GROUP_VISIBILITY_MESSAGE, self.container_page.sidebar_visibility_message) self.assertTrue(component.has_group_visibility_set) else: self.assertNotIn(self.GROUP_VISIBILITY_MESSAGE, self.container_page.sidebar_visibility_message) self.assertFalse(component.has_group_visibility_set) def update_component(self, component, metadata): """ Update a component's metadata and refresh the page. """ self.course_fixture._update_xblock(component.locator, {'metadata': metadata}) self.browser.refresh() self.container_page.wait_for_page() def remove_missing_groups(self, visibility_editor, component): """ Deselect the missing groups for a component. After save, verify that there are no missing group messages in the modal and that there is no validation error on the component. """ for option in visibility_editor.selected_options: if option.text == self.MISSING_GROUP_LABEL: option.click() visibility_editor.save() visibility_editor = self.edit_component_visibility(component) self.assertNotIn(self.MISSING_GROUP_LABEL, [item.text for item in visibility_editor.all_options]) visibility_editor.cancel() self.assertFalse(component.has_validation_error) def test_default_selection(self): """ Scenario: The component visibility modal selects visible to all by default. Given I have a unit with one component When I go to the container page for that unit And I open the visibility editor modal for that unit's component Then the default visibility selection should be 'All Students and Staff' And the container page should not display the content visibility warning """ self.verify_selected_labels(self.edit_component_visibility(self.html_component), [self.VISIBILITY_LABEL_ALL]) self.verify_visibility_set(self.html_component, False) def test_reset_to_all_students_and_staff(self): """ Scenario: The component visibility modal can be set to be visible to all students and staff. Given I have a unit with one component When I go to the container page for that unit And I open the visibility editor modal for that unit's component And I select 'Dogs' And I save the modal Then the container page should display the content visibility warning And I re-open the visibility editor modal for that unit's component And I select 'All Students and Staff' And I save the modal Then the visibility selection should be 'All Students and Staff' And the container page should not display the content visibility warning """ self.select_and_verify_saved(self.html_component, ['Dogs']) self.verify_visibility_set(self.html_component, True) self.select_and_verify_saved(self.html_component, [self.VISIBILITY_LABEL_ALL]) self.verify_visibility_set(self.html_component, False) def test_select_single_content_group(self): """ Scenario: The component visibility modal can be set to be visible to one content group. Given I have a unit with one component When I go to the container page for that unit And I open the visibility editor modal for that unit's component And I select 'Dogs' And I save the modal Then the visibility selection should be 'Dogs' and 'Specific Content Groups' And the container page should display the content visibility warning """ self.select_and_verify_saved(self.html_component, ['Dogs']) self.verify_visibility_set(self.html_component, True) def test_select_multiple_content_groups(self): """ Scenario: The component visibility modal can be set to be visible to multiple content groups. Given I have a unit with one component When I go to the container page for that unit And I open the visibility editor modal for that unit's component And I select 'Dogs' and 'Cats' And I save the modal Then the visibility selection should be 'Dogs', 'Cats', and 'Specific Content Groups' And the container page should display the content visibility warning """ self.select_and_verify_saved(self.html_component, ['Dogs', 'Cats']) self.verify_visibility_set(self.html_component, True) def test_select_zero_content_groups(self): """ Scenario: The component visibility modal can not be set to be visible to 'Specific Content Groups' without selecting those specific groups. Given I have a unit with one component When I go to the container page for that unit And I open the visibility editor modal for that unit's component And I select 'Specific Content Groups' And I save the modal Then the visibility selection should be 'All Students and Staff' And the container page should not display the content visibility warning """ self.select_and_verify_saved( self.html_component, [self.VISIBILITY_LABEL_SPECIFIC], expected_labels=[self.VISIBILITY_LABEL_ALL] ) self.verify_visibility_set(self.html_component, False) def test_missing_groups(self): """ Scenario: The component visibility modal shows a validation error when visibility is set to multiple unknown group ids. Given I have a unit with one component And that component's group access specifies multiple invalid group ids When I go to the container page for that unit Then I should see a validation error message on that unit's component And I open the visibility editor modal for that unit's component Then I should see that I have selected multiple deleted groups And the container page should display the content visibility warning And I de-select the missing groups And I save the modal Then the visibility selection should be 'All Students and Staff' And I should not see any validation errors on the component And the container page should not display the content visibility warning """ self.update_component(self.html_component, {'group_access': {0: [2, 3]}}) self.verify_component_validation_error(self.html_component) visibility_editor = self.edit_component_visibility(self.html_component) self.verify_selected_labels(visibility_editor, [self.MISSING_GROUP_LABEL] * 2) self.remove_missing_groups(visibility_editor, self.html_component) self.verify_visibility_set(self.html_component, False) def test_found_and_missing_groups(self): """ Scenario: The component visibility modal shows a validation error when visibility is set to multiple unknown group ids and multiple known group ids. Given I have a unit with one component And that component's group access specifies multiple invalid and valid group ids When I go to the container page for that unit Then I should see a validation error message on that unit's component And I open the visibility editor modal for that unit's component Then I should see that I have selected multiple deleted groups And the container page should display the content visibility warning And I de-select the missing groups And I save the modal Then the visibility selection should be the names of the valid groups. And I should not see any validation errors on the component And the container page should display the content visibility warning """ self.update_component(self.html_component, {'group_access': {0: [0, 1, 2, 3]}}) self.verify_component_validation_error(self.html_component) visibility_editor = self.edit_component_visibility(self.html_component) self.verify_selected_labels(visibility_editor, ['Dogs', 'Cats'] + [self.MISSING_GROUP_LABEL] * 2) self.remove_missing_groups(visibility_editor, self.html_component) visibility_editor = self.edit_component_visibility(self.html_component) self.verify_selected_labels(visibility_editor, ['Dogs', 'Cats']) self.verify_visibility_set(self.html_component, True) @attr(shard=1) class UnitPublishingTest(ContainerBase): """ Tests of the publishing control and related widgets on the Unit page. """ PUBLISHED_STATUS = "Publishing Status\nPublished (not yet released)" PUBLISHED_LIVE_STATUS = "Publishing Status\nPublished and Live" DRAFT_STATUS = "Publishing Status\nDraft (Unpublished changes)" LOCKED_STATUS = "Publishing Status\nVisible to Staff Only" RELEASE_TITLE_RELEASED = "RELEASED:" RELEASE_TITLE_RELEASE = "RELEASE:" LAST_PUBLISHED = 'Last published' LAST_SAVED = 'Draft saved on' def populate_course_fixture(self, course_fixture): """ Sets up a course structure with a unit and a single HTML child. """ self.html_content = '<p><strong>Body of HTML Unit.</strong></p>' self.courseware = CoursewarePage(self.browser, self.course_id) past_start_date = datetime.datetime(1974, 6, 22) self.past_start_date_text = "Jun 22, 1974 at 00:00 UTC" future_start_date = datetime.datetime(2100, 9, 13) course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('vertical', 'Test Unit').add_children( XBlockFixtureDesc('html', 'Test html', data=self.html_content) ) ) ), XBlockFixtureDesc( 'chapter', 'Unlocked Section', metadata={'start': past_start_date.isoformat()} ).add_children( XBlockFixtureDesc('sequential', 'Unlocked Subsection').add_children( XBlockFixtureDesc('vertical', 'Unlocked Unit').add_children( XBlockFixtureDesc('problem', '<problem></problem>', data=self.html_content) ) ) ), XBlockFixtureDesc('chapter', 'Section With Locked Unit').add_children( XBlockFixtureDesc( 'sequential', 'Subsection With Locked Unit', metadata={'start': past_start_date.isoformat()} ).add_children( XBlockFixtureDesc( 'vertical', 'Locked Unit', metadata={'visible_to_staff_only': True} ).add_children( XBlockFixtureDesc('discussion', '', data=self.html_content) ) ) ), XBlockFixtureDesc( 'chapter', 'Unreleased Section', metadata={'start': future_start_date.isoformat()} ).add_children( XBlockFixtureDesc('sequential', 'Unreleased Subsection').add_children( XBlockFixtureDesc('vertical', 'Unreleased Unit') ) ) ) def test_publishing(self): """ Scenario: The publish title changes based on whether or not draft content exists Given I have a published unit with no unpublished changes When I go to the unit page in Studio Then the title in the Publish information box is "Published and Live" And the Publish button is disabled And the last published text contains "Last published" And the last saved text contains "Last published" And when I add a component to the unit Then the title in the Publish information box is "Draft (Unpublished changes)" And the last saved text contains "Draft saved on" And the Publish button is enabled And when I click the Publish button Then the title in the Publish information box is "Published and Live" And the last published text contains "Last published" And the last saved text contains "Last published" """ unit = self.go_to_unit_page() self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) # Start date set in course fixture to 1970. self._verify_release_date_info( unit, self.RELEASE_TITLE_RELEASED, 'Jan 01, 1970 at 00:00 UTC\nwith Section "Test Section"' ) self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_PUBLISHED) # Should not be able to click on Publish action -- but I don't know how to test that it is not clickable. # TODO: continue discussion with Muhammad and Jay about this. # Add a component to the page so it will have unpublished changes. add_discussion(unit) self._verify_publish_title(unit, self.DRAFT_STATUS) self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_SAVED) unit.publish_action.click() unit.wait_for_ajax() self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_PUBLISHED) def test_discard_changes(self): """ Scenario: The publish title changes after "Discard Changes" is clicked Given I have a published unit with no unpublished changes When I go to the unit page in Studio Then the Discard Changes button is disabled And I add a component to the unit Then the title in the Publish information box is "Draft (Unpublished changes)" And the Discard Changes button is enabled And when I click the Discard Changes button Then the title in the Publish information box is "Published and Live" """ unit = self.go_to_unit_page() add_discussion(unit) self._verify_publish_title(unit, self.DRAFT_STATUS) unit.discard_changes() self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) def test_view_live_no_changes(self): """ Scenario: "View Live" shows published content in LMS Given I have a published unit with no unpublished changes When I go to the unit page in Studio Then the View Live button is enabled And when I click on the View Live button Then I see the published content in LMS """ unit = self.go_to_unit_page() self._view_published_version(unit) self._verify_components_visible(['html']) def test_view_live_changes(self): """ Scenario: "View Live" does not show draft content in LMS Given I have a published unit with no unpublished changes When I go to the unit page in Studio And when I add a component to the unit And when I click on the View Live button Then I see the published content in LMS And I do not see the unpublished component """ unit = self.go_to_unit_page() add_discussion(unit) self._view_published_version(unit) self._verify_components_visible(['html']) self.assertEqual(self.html_content, self.courseware.xblock_component_html_content(0)) def test_view_live_after_publish(self): """ Scenario: "View Live" shows newly published content Given I have a published unit with no unpublished changes When I go to the unit page in Studio And when I add a component to the unit And when I click the Publish button And when I click on the View Live button Then I see the newly published component """ unit = self.go_to_unit_page() add_discussion(unit) unit.publish_action.click() self._view_published_version(unit) self._verify_components_visible(['html', 'discussion']) def test_initially_unlocked_visible_to_students(self): """ Scenario: An unlocked unit with release date in the past is visible to students Given I have a published unlocked unit with release date in the past When I go to the unit page in Studio Then the unit has a warning that it is visible to students And it is marked as "RELEASED" with release date in the past visible And when I click on the View Live Button And when I view the course as a student Then I see the content in the unit """ unit = self.go_to_unit_page("Unlocked Section", "Unlocked Subsection", "Unlocked Unit") self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) self.assertTrue(unit.currently_visible_to_students) self._verify_release_date_info( unit, self.RELEASE_TITLE_RELEASED, self.past_start_date_text + '\n' + 'with Section "Unlocked Section"' ) self._view_published_version(unit) self._verify_student_view_visible(['problem']) def test_locked_visible_to_staff_only(self): """ Scenario: After locking a unit with release date in the past, it is only visible to staff Given I have a published unlocked unit with release date in the past When I go to the unit page in Studio And when I select "Hide from students" Then the unit does not have a warning that it is visible to students And the unit does not display inherited staff lock And when I click on the View Live Button Then I see the content in the unit when logged in as staff And when I view the course as a student Then I do not see any content in the unit """ unit = self.go_to_unit_page("Unlocked Section", "Unlocked Subsection", "Unlocked Unit") checked = unit.toggle_staff_lock() self.assertTrue(checked) self.assertFalse(unit.currently_visible_to_students) self.assertFalse(unit.shows_inherited_staff_lock()) self._verify_publish_title(unit, self.LOCKED_STATUS) self._view_published_version(unit) # Will initially be in staff view, locked component should be visible. self._verify_components_visible(['problem']) # Switch to student view and verify not visible self._verify_student_view_locked() def test_initially_locked_not_visible_to_students(self): """ Scenario: A locked unit with release date in the past is not visible to students Given I have a published locked unit with release date in the past When I go to the unit page in Studio Then the unit does not have a warning that it is visible to students And it is marked as "RELEASE" with release date in the past visible And when I click on the View Live Button And when I view the course as a student Then I do not see any content in the unit """ unit = self.go_to_unit_page("Section With Locked Unit", "Subsection With Locked Unit", "Locked Unit") self._verify_publish_title(unit, self.LOCKED_STATUS) self.assertFalse(unit.currently_visible_to_students) self._verify_release_date_info( unit, self.RELEASE_TITLE_RELEASE, self.past_start_date_text + '\n' + 'with Subsection "Subsection With Locked Unit"' ) self._view_published_version(unit) self._verify_student_view_locked() def test_unlocked_visible_to_all(self): """ Scenario: After unlocking a unit with release date in the past, it is visible to both students and staff Given I have a published unlocked unit with release date in the past When I go to the unit page in Studio And when I deselect "Hide from students" Then the unit does have a warning that it is visible to students And when I click on the View Live Button Then I see the content in the unit when logged in as staff And when I view the course as a student Then I see the content in the unit """ unit = self.go_to_unit_page("Section With Locked Unit", "Subsection With Locked Unit", "Locked Unit") checked = unit.toggle_staff_lock() self.assertFalse(checked) self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) self.assertTrue(unit.currently_visible_to_students) self._view_published_version(unit) # Will initially be in staff view, components always visible. self._verify_components_visible(['discussion']) # Switch to student view and verify visible. self._verify_student_view_visible(['discussion']) def test_explicit_lock_overrides_implicit_subsection_lock_information(self): """ Scenario: A unit's explicit staff lock hides its inherited subsection staff lock information Given I have a course with sections, subsections, and units And I have enabled explicit staff lock on a subsection When I visit the unit page Then the unit page shows its inherited staff lock And I enable explicit staff locking Then the unit page does not show its inherited staff lock And when I disable explicit staff locking Then the unit page now shows its inherited staff lock """ self.outline.visit() self.outline.expand_all_subsections() subsection = self.outline.section_at(0).subsection_at(0) unit = subsection.unit_at(0) subsection.set_staff_lock(True) unit_page = unit.go_to() self._verify_explicit_lock_overrides_implicit_lock_information(unit_page) def test_explicit_lock_overrides_implicit_section_lock_information(self): """ Scenario: A unit's explicit staff lock hides its inherited subsection staff lock information Given I have a course with sections, subsections, and units And I have enabled explicit staff lock on a section When I visit the unit page Then the unit page shows its inherited staff lock And I enable explicit staff locking Then the unit page does not show its inherited staff lock And when I disable explicit staff locking Then the unit page now shows its inherited staff lock """ self.outline.visit() self.outline.expand_all_subsections() section = self.outline.section_at(0) unit = section.subsection_at(0).unit_at(0) section.set_staff_lock(True) unit_page = unit.go_to() self._verify_explicit_lock_overrides_implicit_lock_information(unit_page) def test_published_unit_with_draft_child(self): """ Scenario: A published unit with a draft child can be published Given I have a published unit with no unpublished changes When I go to the unit page in Studio And edit the content of the only component Then the content changes And the title in the Publish information box is "Draft (Unpublished changes)" And when I click the Publish button Then the title in the Publish information box is "Published and Live" And when I click the View Live button Then I see the changed content in LMS """ modified_content = 'modified content' unit = self.go_to_unit_page() component = unit.xblocks[1] component.edit() HtmlComponentEditorView(self.browser, component.locator).set_content_and_save(modified_content) self.assertEqual(component.student_content, modified_content) self._verify_publish_title(unit, self.DRAFT_STATUS) unit.publish_action.click() unit.wait_for_ajax() self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) self._view_published_version(unit) self.assertIn(modified_content, self.courseware.xblock_component_html_content(0)) def test_cancel_does_not_create_draft(self): """ Scenario: Editing a component and then canceling does not create a draft version (TNL-399) Given I have a published unit with no unpublished changes When I go to the unit page in Studio And edit the content of an HTML component and then press cancel Then the content does not change And the title in the Publish information box is "Published and Live" And when I reload the page Then the title in the Publish information box is "Published and Live" """ unit = self.go_to_unit_page() component = unit.xblocks[1] component.edit() HtmlComponentEditorView(self.browser, component.locator).set_content_and_cancel("modified content") self.assertEqual(component.student_content, "Body of HTML Unit.") self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) self.browser.refresh() unit.wait_for_page() self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) def test_delete_child_in_published_unit(self): """ Scenario: A published unit can be published again after deleting a child Given I have a published unit with no unpublished changes When I go to the unit page in Studio And delete the only component Then the title in the Publish information box is "Draft (Unpublished changes)" And when I click the Publish button Then the title in the Publish information box is "Published and Live" And when I click the View Live button Then I see an empty unit in LMS """ unit = self.go_to_unit_page() unit.delete(0) self._verify_publish_title(unit, self.DRAFT_STATUS) unit.publish_action.click() unit.wait_for_ajax() self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) self._view_published_version(unit) self.assertEqual(0, self.courseware.num_xblock_components) def test_published_not_live(self): """ Scenario: The publish title displays correctly for units that are not live Given I have a published unit with no unpublished changes that releases in the future When I go to the unit page in Studio Then the title in the Publish information box is "Published (not yet released)" And when I add a component to the unit Then the title in the Publish information box is "Draft (Unpublished changes)" And when I click the Publish button Then the title in the Publish information box is "Published (not yet released)" """ unit = self.go_to_unit_page('Unreleased Section', 'Unreleased Subsection', 'Unreleased Unit') self._verify_publish_title(unit, self.PUBLISHED_STATUS) add_discussion(unit) self._verify_publish_title(unit, self.DRAFT_STATUS) unit.publish_action.click() unit.wait_for_ajax() self._verify_publish_title(unit, self.PUBLISHED_STATUS) def _view_published_version(self, unit): """ Goes to the published version, then waits for the browser to load the page. """ unit.view_published_version() self.assertEqual(len(self.browser.window_handles), 2) self.courseware.wait_for_page() def _verify_and_return_staff_page(self): """ Verifies that the browser is on the staff page and returns a StaffPage. """ page = StaffPage(self.browser, self.course_id) EmptyPromise(page.is_browser_on_page, 'Browser is on staff page in LMS').fulfill() return page def _verify_student_view_locked(self): """ Verifies no component is visible when viewing as a student. """ self._verify_and_return_staff_page().set_staff_view_mode('Student') self.assertEqual(0, self.courseware.num_xblock_components) def _verify_student_view_visible(self, expected_components): """ Verifies expected components are visible when viewing as a student. """ self._verify_and_return_staff_page().set_staff_view_mode('Student') self._verify_components_visible(expected_components) def _verify_components_visible(self, expected_components): """ Verifies the expected components are visible (and there are no extras). """ self.assertEqual(len(expected_components), self.courseware.num_xblock_components) for index, component in enumerate(expected_components): self.assertEqual(component, self.courseware.xblock_component_type(index)) def _verify_release_date_info(self, unit, expected_title, expected_date): """ Verifies how the release date is displayed in the publishing sidebar. """ self.assertEqual(expected_title, unit.release_title) self.assertEqual(expected_date, unit.release_date) def _verify_publish_title(self, unit, expected_title): """ Waits for the publish title to change to the expected value. """ def wait_for_title_change(): return (unit.publish_title == expected_title, unit.publish_title) Promise(wait_for_title_change, "Publish title incorrect. Found '" + unit.publish_title + "'").fulfill() def _verify_last_published_and_saved(self, unit, expected_published_prefix, expected_saved_prefix): """ Verifies that last published and last saved messages respectively contain the given strings. """ self.assertIn(expected_published_prefix, unit.last_published_text) self.assertIn(expected_saved_prefix, unit.last_saved_text) def _verify_explicit_lock_overrides_implicit_lock_information(self, unit_page): """ Verifies that a unit with inherited staff lock does not display inherited information when explicitly locked. """ self.assertTrue(unit_page.shows_inherited_staff_lock()) unit_page.toggle_staff_lock(inherits_staff_lock=True) self.assertFalse(unit_page.shows_inherited_staff_lock()) unit_page.toggle_staff_lock(inherits_staff_lock=True) self.assertTrue(unit_page.shows_inherited_staff_lock()) # TODO: need to work with Jay/Christine to get testing of "Preview" working. # def test_preview(self): # unit = self.go_to_unit_page() # add_discussion(unit) # unit.preview() # self.assertEqual(2, self.courseware.num_xblock_components) # self.assertEqual('html', self.courseware.xblock_component_type(0)) # self.assertEqual('discussion', self.courseware.xblock_component_type(1)) @attr(shard=3) class DisplayNameTest(ContainerBase): """ Test consistent use of display_name_with_default """ def populate_course_fixture(self, course_fixture): """ Sets up a course structure with nested verticals. """ course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('vertical', 'Test Unit').add_children( XBlockFixtureDesc('vertical', None) ) ) ) ) def test_display_name_default(self): """ Scenario: Given that an XBlock with a dynamic display name has been added to the course, When I view the unit page and note the display name of the block, Then I see the dynamically generated display name, And when I then go to the container page for that same block, Then I see the same generated display name. """ # Unfortunately no blocks in the core platform implement display_name_with_default # in an interesting way for this test, so we are just testing for consistency and not # the actual value. unit = self.go_to_unit_page() test_block = unit.xblocks[1] title_on_unit_page = test_block.name container = test_block.go_to_container() self.assertEqual(container.name, title_on_unit_page) @attr(shard=3) class ProblemCategoryTabsTest(ContainerBase): """ Test to verify tabs in problem category. """ def setUp(self, is_staff=True): super(ProblemCategoryTabsTest, self).setUp(is_staff=is_staff) def populate_course_fixture(self, course_fixture): """ Sets up course structure. """ course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('vertical', 'Test Unit') ) ) ) def test_correct_tabs_present(self): """ Scenario: Verify that correct tabs are present in problem category. Given I am a staff user When I go to unit page Then I only see `Common Problem Types` and `Advanced` tabs in `problem` category """ self.go_to_unit_page() page = ContainerPage(self.browser, None) self.assertEqual(page.get_category_tab_names('problem'), ['Common Problem Types', 'Advanced']) def test_common_problem_types_tab(self): """ Scenario: Verify that correct components are present in Common Problem Types tab. Given I am a staff user When I go to unit page Then I see correct components under `Common Problem Types` tab in `problem` category """ self.go_to_unit_page() page = ContainerPage(self.browser, None) expected_components = [ "Blank Common Problem", "Checkboxes", "Dropdown", "Multiple Choice", "Numerical Input", "Text Input", "Checkboxes with Hints and Feedback", "Dropdown with Hints and Feedback", "Multiple Choice with Hints and Feedback", "Numerical Input with Hints and Feedback", "Text Input with Hints and Feedback", ] self.assertEqual(page.get_category_tab_components('problem', 1), expected_components)
louyihua/edx-platform
common/test/acceptance/tests/studio/test_studio_container.py
Python
agpl-3.0
53,390
[ "VisIt" ]
0ad48ac85c205d38a05c2014f2bfd945f68423229e34541680f9e18aa49294c5
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # # The dataset read by this exercise ("combVectors.vtk") has field data # associated with the pointdata, namely two vector fields. In this exercise, # you will convert both sets of field data into attribute data. Mappers only # process attribute data, not field data. So we must convert the field data to # attribute data in order to display it. (You'll need to determine the "names" # of the two vector fields in the field data.) # # If there is time remaining, you might consider adding a programmable filter # to convert the two sets of vectors into a single scalar field, representing # the angle between the two vector fields. # # You will most likely use vtkFieldDataToAttributeDataFilter, vtkHedgeHog, # and vtkProgrammableAttributeDataFilter. # # Create the RenderWindow, Renderer and interactor # ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.SetMultiSamples(0) renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create pipeline # # get the pressure gradient vector field pl3d_gradient = vtk.vtkMultiBlockPLOT3DReader() pl3d_gradient.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin") pl3d_gradient.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin") pl3d_gradient.SetScalarFunctionNumber(100) pl3d_gradient.SetVectorFunctionNumber(210) pl3d_gradient.Update() pl3d_g_output = pl3d_gradient.GetOutput().GetBlock(0) # get the velocity vector field pl3d_velocity = vtk.vtkMultiBlockPLOT3DReader() pl3d_velocity.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin") pl3d_velocity.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin") pl3d_velocity.SetScalarFunctionNumber(100) pl3d_velocity.SetVectorFunctionNumber(200) pl3d_velocity.Update() pl3d_v_output = pl3d_velocity.GetOutput().GetBlock(0) # contour the scalar fields contour = vtk.vtkContourFilter() contour.SetInputData(pl3d_g_output) contour.SetValue(0,0.225) # probe the vector fields to get data at the contour surface probe_gradient = vtk.vtkProbeFilter() probe_gradient.SetInputConnection(contour.GetOutputPort()) probe_gradient.SetSourceData(pl3d_g_output) probe_velocity = vtk.vtkProbeFilter() probe_velocity.SetInputConnection(contour.GetOutputPort()) probe_velocity.SetSourceData(pl3d_v_output) # # To display the vector fields, we use vtkHedgeHog to create lines. # velocity = vtk.vtkHedgeHog() velocity.SetInputConnection(probe_velocity.GetOutputPort()) velocity.SetScaleFactor(0.0015) pressureGradient = vtk.vtkHedgeHog() pressureGradient.SetInputConnection(probe_gradient.GetOutputPort()) pressureGradient.SetScaleFactor(0.00002) def ExecuteDot (__vtk__temp0=0,__vtk__temp1=0): # proc for ProgrammableAttributeDataFilter. Note the use of "double()" # in the calculations. This protects us from Tcl using ints and # overflowing. inputs = dotProduct.GetInputList() input0 = inputs.GetDataSet(0) input1 = inputs.GetDataSet(1) numPts = input0.GetNumberOfPoints() vectors0 = input0.GetPointData().GetVectors() vectors1 = input1.GetPointData().GetVectors() scalars = vtk.vtkFloatArray() i = 0 while i < numPts: v0 = vectors0.GetTuple3(i) v1 = vectors1.GetTuple3(i) v0x = lindex(v0,0) v0y = lindex(v0,1) v0z = lindex(v0,2) v1x = lindex(v1,0) v1y = lindex(v1,1) v1z = lindex(v1,2) l0 = expr.expr(globals(), locals(),["double","(","v0x",")*","double","(","v0x",")","+","double","(","v0y",")*","double","(","v0y",")","+","double","(","v0z",")*","double","(","v0z",")"]) l1 = expr.expr(globals(), locals(),["double","(","v1x",")*","double","(","v1x",")","+","double","(","v1y",")*","double","(","v1y",")","+","double","(","v1z",")*","double","(","v1z",")"]) l0 = expr.expr(globals(), locals(),["sqrt","(","double","(","l0","))"]) l1 = expr.expr(globals(), locals(),["sqrt","(","double","(","l1","))"]) if (l0 > 0.0 and l1 > 0.0): d = expr.expr(globals(), locals(),["(","double","(","v0x",")*","double","(","v1x",")","+","double","(","v0y",")*","double","(","v1y",")","+","double","(","v0z",")*","double","(","v1z","))/(","l0","*","l1",")"]) pass else: d = 0.0 pass scalars.InsertValue(i,d) i = i + 1 dotProduct.GetOutput().GetPointData().SetScalars(scalars) del scalars # # We use the ProgrammableAttributeDataFilter to compute the cosine # of the angle between the two vector fields (i.e. the dot product # normalized by the product of the vector lengths). # # dotProduct = vtk.vtkProgrammableAttributeDataFilter() dotProduct.SetInputConnection(probe_velocity.GetOutputPort()) dotProduct.AddInput(probe_velocity.GetOutput()) dotProduct.AddInput(probe_gradient.GetOutput()) dotProduct.SetExecuteMethod(ExecuteDot) # # Create the mappers and actors. Note the call to GetPolyDataOutput when # setting up the mapper for the ProgrammableAttributeDataFilter # velocityMapper = vtk.vtkPolyDataMapper() velocityMapper.SetInputConnection(velocity.GetOutputPort()) velocityMapper.ScalarVisibilityOff() velocityActor = vtk.vtkLODActor() velocityActor.SetMapper(velocityMapper) velocityActor.SetNumberOfCloudPoints(1000) velocityActor.GetProperty().SetColor(1,0,0) pressureGradientMapper = vtk.vtkPolyDataMapper() pressureGradientMapper.SetInputConnection(pressureGradient.GetOutputPort()) pressureGradientMapper.ScalarVisibilityOff() pressureGradientActor = vtk.vtkLODActor() pressureGradientActor.SetMapper(pressureGradientMapper) pressureGradientActor.SetNumberOfCloudPoints(1000) pressureGradientActor.GetProperty().SetColor(0,1,0) dotMapper = vtk.vtkPolyDataMapper() dotMapper.SetInputConnection(dotProduct.GetOutputPort()) dotMapper.SetScalarRange(-1,1) dotActor = vtk.vtkLODActor() dotActor.SetMapper(dotMapper) dotActor.SetNumberOfCloudPoints(1000) # # The PLOT3DReader is used to draw the outline of the original dataset. # pl3d = vtk.vtkMultiBlockPLOT3DReader() pl3d.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin") pl3d.Update() pl3d_output = pl3d.GetOutput().GetBlock(0) outline = vtk.vtkStructuredGridOutlineFilter() outline.SetInputData(pl3d_output) outlineMapper = vtk.vtkPolyDataMapper() outlineMapper.SetInputConnection(outline.GetOutputPort()) outlineActor = vtk.vtkActor() outlineActor.SetMapper(outlineMapper) outlineActor.GetProperty().SetColor(0,0,0) # # Add the actors to the renderer, set the background and size # ren1.AddActor(outlineActor) ren1.AddActor(velocityActor) ren1.AddActor(pressureGradientActor) ren1.AddActor(dotActor) ren1.SetBackground(1,1,1) renWin.SetSize(500,500) #ren1 SetBackground 0.1 0.2 0.4 cam1 = ren1.GetActiveCamera() cam1.SetClippingRange(3.95297,50) cam1.SetFocalPoint(9.71821,0.458166,29.3999) cam1.SetPosition(-21.6807,-22.6387,35.9759) cam1.SetViewUp(-0.0158865,0.293715,0.955761) # render the image # renWin.Render() renWin.SetWindowName("Multidimensional Visualization Exercise") # prevent the tk window from showing up then start the event loop # --- end of script --
hlzz/dotfiles
graphics/VTK-7.0.0/Filters/Programmable/Testing/Python/MultidimensionalSolution.py
Python
bsd-3-clause
7,335
[ "VTK" ]
9baa1239e28cca6aef41c77e571f0d7b538018cfc1b41f31c4247c2a7c142dd1
#!/usr/bin/env python # Script using Biopython objects to import and adapt PDB files from Bio.PDB import * import numpy as np import requests import argparse parser=PDBParser(QUIET=True) #Protocol # 1) Read in arguments for downloading PDB files # 2) Read in table of PDB entries and CDR ranges # 3) Delete atoms in CDR ranges # 4) Delete Hetatms # 5) Print out CA atoms only def main(): ftp=False options=argparse.ArgumentParser(description='Processes and edits PDB files ready for overlay and PCA.') options.add_argument('-f','--ftp',action='store_true',help="Uses ftp downloads of structures from RCSB PDB") args=options.parse_args() ftp=args.ftp table=rangeimport("../Abysis/Light_FR_CDR_table_1.txt") heavyobs=[] for line in table: pdbid=line[0] if line[0].startswith('#'): break structure1=getpdb(ftp,pdbid) structure2=abnum(pdbid) cdrlist = [] cdrlist = line[2].split('-') cdrlist.extend(line[4].split('-')) cdrlist.extend(line[6].split('-')) cdrlist=np.array(map(int, cdrlist)) if structure1[0].has_id('L'): heavy1=structure1[0]['L'] elif structure1[0].has_id('A'): heavy1=structure1[0]['A'] else: print "PDB %s has no chain L or chain A. Skipping editing of light chain." % pdbid break try: heavy2=structure2[0]['L'] except KeyError: print "Cannot find chain L in Kabat numbered structure of %s.\n This PDB should be edited separately!" % pdbid # Delete Hetatm records for j in [ i for i in heavy1 if i.id[0] is not ' ' ]: heavy1.__delitem__(j.get_id()) heavy1=renum_heavy(heavy1,heavy2) heavy1=delheavy(cdrlist,heavy1) heavyobs.append([pdbid,heavy1]) # Find consensus start point of heavys heavyobs=chopstart(heavyobs) for i in heavyobs: io=PDBIO() io.set_structure(i[1]) io.save("Created/%s_L_edited.pdb" % i[0], CASelect()) # Delete residues, need to work out how to check for chain H or B etc. def getpdb(ftp,pdbid): if ftp is True: pdbl=PDBList() structure1=parser.get_structure(pdbid,pdbl.retrieve_pdb_file(pdbid,pdir="Downloaded")) return structure1 else: pdbfile = pdbid + ".pdb" structure1=parser.get_structure(pdbid,pdbfile) return structure1 def abnum(pdbid): oldfn = "pdb" + pdbid.lower() + ".ent" newfn = "Created/" + pdbid + "_V_Kabat.pdb" files={'pdb': open('./Downloaded/%s' % oldfn)} data={'scheme': '-k'} url='http://www.bioinf.org.uk/cgi-bin/abnum/abnumpdb.pl' r = requests.post(url,files=files,data=data) f = open(newfn, 'w') f.write(r.text) f.close() structure2=parser.get_structure(pdbid + "2",newfn) return structure2 def renum_heavy(heavy1,heavy2): # Function to renumber chain using kabat numbered chain # Must first renumber original chain to avoid duplicate resids # Then insert Kabat numbered chain # Then renumber residues sequentially following Kabat numbering # Step 1, renumber heavy1 chain. Must renumber child_list and child_dict count = 5001 # print len(heavy1.child_dict) for residue in heavy1: origid = residue.id # print residue.id newid = (' ',count,' ') residue.id = newid heavy1.child_dict[newid] = heavy1.child_dict.pop(origid) # print heavy1.child_dict[newid] count += 1 # Step 2, detach old numbers, insert Kabat numbers maxindex=len(heavy2) count = 0 resids=[] # print len(heavy1.child_dict) for resid in heavy2.get_list(): resids.append(resid.id) while count < len(resids): heavy1.detach_child(heavy1.get_list()[0].id) count += 1 index = 0 for residue in heavy2: heavy1.insert(index,residue) index += 1 # Step 3, renumber res following Kabat numbers count = int(heavy1.get_list()[index-1].id[1]) + 1 while index < len(heavy1): origid = heavy1.get_list()[index].id newid = (' ',count,' ') heavy1.get_list()[index].id = newid heavy1.child_dict[newid] = heavy1.child_dict.pop(origid) index += 1 count += 1 return heavy1 def rangeimport(filename): #Import table of CDR regions to delete table=np.genfromtxt(filename, skip_header=9, dtype=None) print "Now importing CDR ranges from file %s \n" % filename print "(The first entry looks like this):" print str(table[0]) return table def delheavy(cdrlist,heavy1): #Delete CDR regions in heavy chain for delres in range(cdrlist[0],cdrlist[1]+1,1): if heavy1.has_id(delres): for j in [ i for i in heavy1 if i.id[1] == delres ]: heavy1.__delitem__(j.get_id()) for delres in range(cdrlist[2],cdrlist[3]+1,1): if heavy1.has_id(delres): for j in [ i for i in heavy1 if i.id[1] == delres ]: heavy1.__delitem__(j.get_id()) for delres in range(cdrlist[4],cdrlist[5]+1,1): if heavy1.has_id(delres): for j in [ i for i in heavy1 if i.id[1] == delres ]: heavy1.__delitem__(j.get_id()) if heavy1.has_id(10): print "Deleting residue 10 from PDB %s" % heavy1.get_parent().get_parent().get_id() for j in [ i for i in heavy1 if i.id[1] == 10 ]: heavy1.__delitem__(j.get_id()) # Delete Hetatm records # for j in [ i for i in heavy1 if i.id[0] is not ' ' ]: # heavy1.__delitem__(j.get_id()) return heavy1 def chopstart(heavyobs): #Finds the chain with the highest number start point, chops all others to this length resid_list=[] min_n=int(1) for obj in heavyobs: resids=[] for resid in obj[1].get_list(): resids.append(resid.id[1]) resid_list.append(resids) for j in resid_list: if np.min(j) > min_n: min_n = np.min(j) print "Now chopping up to residue %i " % min_n for obj in heavyobs: for delres in range(1,min_n,1): if obj[1].has_id(delres): for j in [ i for i in obj[1] if i.id[1] == delres ]: obj[1].__delitem__(j.get_id()) # Now finds lengths of chains, chops to shortest one minlen = int(100000) maxres = int(1) for j in resid_list: if np.max(j) > maxres: maxres = np.max(j) for obj in heavyobs: if minlen > len(obj[1]): minlen = len(obj[1]) # minres = obj[1][minlen].id[1] print "Now chopping all chains to be %i long " % minlen for obj in heavyobs: for index in range(len(obj[1]),minlen-1,-1): try: # print "Now deleting %s residue %s" % (obj[0], obj[1].get_list()[index].id[1]) obj[1].__delitem__(obj[1].get_list()[index].id[1]) except IndexError: # print "Not editing ",obj[0] continue return heavyobs # Only print out CA atoms. Need to define a Select class for this class CASelect(Select): def accept_atom(self, atom): if atom.get_name()=='CA': if not atom.is_disordered() or atom.get_altloc()=='A': return 1 else: return 0 else: return 0 # Main runs here main()
rtb1c13/scripts
SAXS_MD/Light_delete_CDR.py
Python
gpl-2.0
6,763
[ "Biopython" ]
ffb8f0798a86dddfa654170bac4bbb5a515e5c2a2c7f201c55521d82dc4e4f57
import os import glob import mdtraj as md import numpy as np top = md.load('fs-peptide.pdb') for fn in glob.glob('trajectory-*.dcd'): print(fn) t = md.load(fn, top=top) # the stride is 10 picoseconds t.time = np.arange(t.n_frames) * 10 t = t[::5] t.save(os.path.join('strided', os.path.splitext(fn)[0] + '.xtc'))
msmbuilder/msmb_data
msmb_data/fs_peptide/post-process.py
Python
lgpl-2.1
344
[ "MDTraj" ]
b30c83d16797bfe5dc7e6fd74238788b6300846ae4eabe53af2244b449137eef
STATES = ( ('AK', 'Alaska'), ('AL', 'Alabama'), ('AR', 'Arkansas'), ('AZ', 'Arizona'), ('CA', 'California'), ('CO', 'Colorado'), ('CT', 'Connecticut'), ('DC', 'District of Columbia'), ('DE', 'Delaware'), ('FL', 'Florida'), ('GA', 'Georgia'), ('HI', 'Hawaii'), ('IA', 'Iowa'), ('ID', 'Idaho'), ('IL', 'Illinois'), ('IN', 'Indiana'), ('KS', 'Kansas'), ('KY', 'Kentucky'), ('LA', 'Louisiana'), ('MA', 'Massachusetts'), ('MD', 'Maryland'), ('ME', 'Maine'), ('MI', 'Michigan'), ('MN', 'Minnesota'), ('MO', 'Missouri'), ('MS', 'Mississippi'), ('MT', 'Montana'), ('NC', 'North Carolina'), ('ND', 'North Dakota'), ('NE', 'Nebraska'), ('NH', 'New Hampshire'), ('NJ', 'New Jersey'), ('NM', 'New Mexico'), ('NV', 'Nevada'), ('NY', 'New York'), ('OH', 'Ohio'), ('OK', 'Oklahoma'), ('OR', 'Oregon'), ('PA', 'Pennsylvania'), ('RI', 'Rhode Island'), ('SC', 'South Carolina'), ('SD', 'South Dakota'), ('TN', 'Tennessee'), ('TX', 'Texas'), ('UT', 'Utah'), ('VA', 'Virginia'), ('VI', 'Virgin Islands'), ('VT', 'Vermont'), ('WA', 'Washington'), ('WI', 'Wisconsin'), ('WV', 'West Virginia'), ('WY', 'Wyoming'), ) NUM_STATES = len(STATES) STREET_SUFFIX = ( 'Alley', 'Annex', 'Arcade', 'Avenue', 'Bend', 'Bay', 'Brae', 'Boulevard', 'Bypass', 'Circle', 'Close', 'Concession', 'Court', 'Cove', 'Crescent', 'Drive', 'Drung', 'Esplanade', 'Expressway', 'Extension', 'Ferry', 'Field', 'Freeway', 'Garden', 'Gardens', 'Gate', 'Glen', 'Green', 'Grove', 'Heights', 'High', 'Highway', 'Hill', 'Lane', 'Line', 'Loop', 'Mall', 'Manor', 'Mews', 'Nene', 'Parade', 'Park', 'Parkway', 'Path', 'Pike', 'Place', 'Plantation', 'Plaza', 'Point', 'Private', 'Promenade', 'Road', 'Side', 'Sideline', 'Route', 'Row', 'Run', 'Spur', 'Square', 'Stravenue', 'Street', 'Terrace', 'Thruway', 'Trace', 'Trail', 'Turnpike', 'Townline', 'Viaduct', 'Walk', 'Way', 'Wood', 'Wynd', ) NUM_STREET_SUFFIXES = len(STREET_SUFFIX) YEARS = 1998, 1999, 2000, 2001, 2002 GENDERS = 'M', 'F' MARITAL_STATUSES = 'D', 'M', 'S', 'U', 'W' EDUCATION_STATUSES = ( '2 yr Degree', '4 yr Degree', 'Advanced Degree', 'College', 'Primary', 'Secondary', 'Unknown', ) CATEGORIES = ( 'Books', 'Children', 'Electronics', 'Home', 'Jewelry', 'Men', 'Music', 'Shoes', 'Sports', 'Women', ) COUNTIES = ( 'Abbeville County', 'Acadia Parish', 'Accomack County', 'Ada County', 'Adair County', 'Adams County', 'Addison County', 'Aiken County', 'Aitkin County', 'Alachua County', 'Alamance County', 'Alameda County', 'Alamosa County', 'Albany County', 'Albemarle County', 'Alcona County', 'Alcorn County', 'Aleutians East Borough', 'Aleutians West Census Area', 'Alexander County', 'Alexandria city', 'Alfalfa County', 'Alger County', 'Allamakee County', 'Allegan County', 'Allegany County', 'Alleghany County', 'Allegheny County', 'Allen County', 'Allendale County', 'Allen Parish', 'Alpena County', 'Alpine County', 'Amador County', 'Amelia County', 'Amherst County', 'Amite County', 'Anchorage Borough', 'Anderson County', 'Andrew County', 'Andrews County', 'Androscoggin County', 'Angelina County', 'Anne Arundel County', 'Anoka County', 'Anson County', 'Antelope County', 'Antrim County', 'Apache County', 'Appanoose County', 'Appling County', 'Appomattox County', 'Aransas County', 'Arapahoe County', 'Archer County', 'Archuleta County', 'Arenac County', 'Arkansas County', 'Arlington County', 'Armstrong County', 'Aroostook County', 'Arthur County', 'Ascension Parish', 'Ashe County', 'Ashland County', 'Ashley County', 'Ashtabula County', 'Asotin County', 'Assumption Parish', 'Atascosa County', 'Atchison County', 'Athens County', 'Atkinson County', 'Atlantic County', 'Atoka County', 'Attala County', 'Audrain County', 'Audubon County', 'Auglaize County', 'Augusta County', 'Aurora County', 'Austin County', 'Autauga County', 'Avery County', 'Avoyelles Parish', 'Baca County', 'Bacon County', 'Bailey County', 'Baker County', 'Baldwin County', 'Ballard County', 'Baltimore city', 'Baltimore County', 'Bamberg County', 'Bandera County', 'Banks County', 'Banner County', 'Bannock County', 'Baraga County', 'Barber County', 'Barbour County', 'Barnes County', 'Barnstable County', 'Barnwell County', 'Barren County', 'Barron County', 'Barrow County', 'Barry County', 'Bartholomew County', 'Barton County', 'Bartow County', 'Bastrop County', 'Bates County', 'Bath County', 'Baxter County', 'Bay County', 'Bayfield County', 'Baylor County', 'Beadle County', 'Bear Lake County', 'Beaufort County', 'Beauregard Parish', 'Beaver County', 'Beaverhead County', 'Becker County', 'Beckham County', 'Bedford city', 'Bedford County', 'Bee County', 'Belknap County', 'Bell County', 'Belmont County', 'Beltrami County', 'Benewah County', 'Ben Hill County', 'Bennett County', 'Bennington County', 'Benson County', 'Bent County', 'Benton County', 'Benzie County', 'Bergen County', 'Berkeley County', 'Berks County', 'Berkshire County', 'Bernalillo County', 'Berrien County', 'Bertie County', 'Bethel Census Area', 'Bexar County', 'Bibb County', 'Bienville Parish', 'Big Horn County', 'Big Stone County', 'Billings County', 'Bingham County', 'Blackford County', 'Black Hawk County', 'Bladen County', 'Blaine County', 'Blair County', 'Blanco County', 'Bland County', 'Bleckley County', 'Bledsoe County', 'Blount County', 'Blue Earth County', 'Boise County', 'Bolivar County', 'Bollinger County', 'Bond County', 'Bon Homme County', 'Bonner County', 'Bonneville County', 'Boone County', 'Borden County', 'Bosque County', 'Bossier Parish', 'Botetourt County', 'Bottineau County', 'Boulder County', 'Boundary County', 'Bourbon County', 'Bowie County', 'Bowman County', 'Box Butte County', 'Box Elder County', 'Boyd County', 'Boyle County', 'Bracken County', 'Bradford County', 'Bradley County', 'Branch County', 'Brantley County', 'Braxton County', 'Brazoria County', 'Brazos County', 'Breathitt County', 'Breckinridge County', 'Bremer County', 'Brevard County', 'Brewster County', 'Briscoe County', 'Bristol Bay Borough', 'Bristol city', 'Bristol County', 'Broadwater County', 'Bronx County', 'Brooke County', 'Brookings County', 'Brooks County', 'Broome County', 'Broward County', 'Brown County', 'Brule County', 'Brunswick County', 'Bryan County', 'Buchanan County', 'Buckingham County', 'Bucks County', 'Buena Vista city', 'Buena Vista County', 'Buffalo County', 'Bullitt County', 'Bulloch County', 'Bullock County', 'Buncombe County', 'Bureau County', 'Burke County', 'Burleigh County', 'Burleson County', 'Burlington County', 'Burnet County', 'Burnett County', 'Burt County', 'Butler County', 'Butte County', 'Butts County', 'Cabarrus County', 'Cabell County', 'Cache County', 'Caddo County', 'Caddo Parish', 'Calaveras County', 'Calcasieu Parish', 'Caldwell County', 'Caldwell Parish', 'Caledonia County', 'Calhoun County', 'Callahan County', 'Callaway County', 'Calloway County', 'Calumet County', 'Calvert County', 'Camas County', 'Cambria County', 'Camden County', 'Cameron County', 'Cameron Parish', 'Campbell County', 'Camp County', 'Canadian County', 'Candler County', 'Cannon County', 'Canyon County', 'Cape Girardeau County', 'Cape May County', 'Carbon County', 'Caribou County', 'Carlisle County', 'Carlton County', 'Caroline County', 'Carroll County', 'Carson City', 'Carson County', 'Carter County', 'Carteret County', 'Carver County', 'Cascade County', 'Casey County', 'Cass County', 'Cassia County', 'Castro County', 'Caswell County', 'Catahoula Parish', 'Catawba County', 'Catoosa County', 'Catron County', 'Cattaraugus County', 'Cavalier County', 'Cayuga County', 'Cecil County', 'Cedar County', 'Centre County', 'Cerro Gordo County', 'Chaffee County', 'Chambers County', 'Champaign County', 'Chariton County', 'Charle', 'Charles City County', 'Charles County', 'Charles Mix County', 'Charleston County', 'Charlevoix County', 'Charlotte County', 'Charlottesville city', 'Charlton County', 'Chase County', 'Chatham County', 'Chattahoochee County', 'Chattooga County', 'Chautauqua County', 'Chaves County', 'Cheatham County', 'Cheboygan County', 'Chelan County', 'Chemung County', 'Chenango County', 'Cherokee County', 'Cherry County', 'Chesapeake city', 'Cheshire County', 'Chester County', 'Chesterfield County', 'Cheyenne County', 'Chickasaw County', 'Chicot County', 'Childress County', 'Chilton County', 'Chippewa County', 'Chisago County', 'Chittenden County', 'Choctaw County', 'Chouteau County', 'Chowan County', 'Christian County', 'Churchill County', 'Cibola County', 'Cimarron County', 'Citrus County', 'Clackamas County', 'Claiborne County', 'Claiborne Parish', 'Clallam County', 'Clare County', 'Clarendon County', 'Clarion County', 'Clark County', 'Clarke County', 'Clatsop County', 'Clay County', 'Clayton County', 'Clear Creek County', 'Clearfield County', 'Clearwater County', 'Cleburne County', 'Clermont County', 'Cleveland County', 'Clifton Forge city', 'Clinch County', 'Clinton County', 'Cloud County', 'Coahoma County', 'Coal County', 'Cobb County', 'Cochise County', 'Cochran County', 'Cocke County', 'Coconino County', 'Codington County', 'Coffee County', 'Coffey County', 'Coke County', 'Colbert County', 'Cole County', 'Coleman County', 'Coles County', 'Colfax County', 'Colleton County', 'Collier County', 'Collin County', 'Collingsworth County', 'Colonial Heights city', 'Colorado County', 'Colquitt County', 'Columbia County', 'Columbiana County', 'Columbus County', 'Colusa County', 'Comal County', 'Comanche County', 'Concho County', 'Concordia Parish', 'Conecuh County', 'Conejos County', 'Contra Costa County', 'Converse County', 'Conway County', 'Cook County', 'Cooke County', 'Cooper County', 'Coosa County', 'Coos County', 'Copiah County', 'Corson County', 'Cortland County', 'Coryell County', 'Coshocton County', 'Costilla County', 'Cottle County', 'Cotton County', 'Cottonwood County', 'Covington city', 'Covington County', 'Coweta County', 'Cowley County', 'Cowlitz County', 'Craig County', 'Craighead County', 'Crane County', 'Craven County', 'Crawford County', 'Creek County', 'Crenshaw County', 'Crisp County', 'Crittenden County', 'Crockett County', 'Crook County', 'Crosby County', 'Cross County', 'Crowley County', 'Crow Wing County', 'Culberson County', 'Cullman County', 'Culpeper County', 'Cumberland County', 'Cuming County', 'Currituck County', 'Curry County', 'Custer County', 'Cuyahoga County', 'Dade County', 'Daggett County', 'Dakota County', 'Dale County', 'Dallam County', 'Dallas County', 'Dane County', 'Daniels County', 'Danville city', 'Dare County', 'Darke County', 'Darlington County', 'Dauphin County', 'Davidson County', 'Davie County', 'Daviess County', 'Davis County', 'Davison County', 'Dawes County', 'Dawson County', 'Day County', 'Deaf Smith County', 'Dearborn County', 'DeBaca County', 'Decatur County', 'Deer Lodge County', 'Defiance County', 'De Kalb County', 'DeKalb County', 'Delaware County', 'Del Norte County', 'Delta County', 'Denali Borough', 'Dent County', 'Denton County', 'Denver County', 'Deschutes County', 'Desha County', 'Des Moines County', 'DeSoto County', 'De Soto Parish', 'Deuel County', 'Dewey County', 'De Witt County', 'DeWitt County', 'Dickens County', 'Dickenson County', 'Dickey County', 'Dickinson County', 'Dickson County', 'Dillingham Census Area', 'Dillon County', 'Dimmit County', 'Dinwiddie County', 'District of Columbia', 'Divide County', 'Dixie County', 'Dixon County', 'Doddridge County', 'Dodge County', 'Dolores County', 'Dona Ana County', 'Doniphan County', 'Donley County', 'Dooly County', 'Door County', 'Dorchester County', 'Dougherty County', 'Douglas County', 'Drew County', 'Dubois County', 'Dubuque County', 'Duchesne County', 'Dukes County', 'Dundy County', 'Dunklin County', 'Dunn County', 'DuPage County', 'Duplin County', 'Durham County', 'Dutchess County', 'Duval County', 'Dyer County', 'Eagle County', 'Early County', 'East Baton Rouge Parish', 'East Carroll Parish', 'East Feliciana Parish', 'Eastland County', 'Eaton County', 'Eau Claire County', 'Echols County', 'Ector County', 'Eddy County', 'Edgar County', 'Edgecombe County', 'Edgefield County', 'Edmonson County', 'Edmunds County', 'Edwards County', 'Effingham County', 'Elbert County', 'El Dorado County', 'Elk County', 'Elkhart County', 'Elko County', 'Elliott County', 'Ellis County', 'Ellsworth County', 'Elmore County', 'El Paso County', 'Emanuel County', 'Emery County', 'Emmet County', 'Emmons County', 'Emporia city', 'Erath County', 'Erie County', 'Escambia County', 'Esmeralda County', 'Essex County', 'Estill County', 'Etowah County', 'Eureka County', 'Evangeline Parish', 'Evans County', 'Fairbanks North Star Borough', 'Fairfax city', 'Fairfax County', 'Fairfield County', 'Fallon County', 'Fall River County', 'Falls Church city', 'Falls County', 'Fannin County', 'Faribault County', 'Faulk County', 'Faulkner County', 'Fauquier County', 'Fayette County', 'Fentress County', 'Fergus County', 'Ferry County', 'Fillmore County', 'Finney County', 'Fisher County', 'Flagler County', 'Flathead County', 'Fleming County', 'Florence County', 'Floyd County', 'Fluvanna County', 'Foard County', 'Fond du Lac County', 'Ford County', 'Forest County', 'Forrest County', 'Forsyth County', 'Fort Bend County', 'Foster County', 'Fountain County', 'Franklin city', 'Franklin County', 'Franklin Parish', 'Frederick County', 'Fredericksburg city', 'Freeborn County', 'Freestone County', 'Fremont County', 'Fresno County', 'Frio County', 'Frontier County', 'Fulton County', 'Furnas County', 'Gadsden County', 'Gage County', 'Gaines County', 'Galax city', 'Gallatin County', 'Gallia County', 'Galveston County', 'Garden County', 'Garfield County', 'Garland County', 'Garrard County', 'Garrett County', 'Garvin County', 'Garza County', 'Gasconade County', 'Gaston County', 'Gates County', 'Geary County', 'Geauga County', 'Gem County', 'Genesee County', 'Geneva County', 'Gentry County', 'George County', 'Georgetown County', 'Gibson County', 'Gila County', 'Gilchrist County', 'Giles County', 'Gillespie County', 'Gilliam County', 'Gilmer County', 'Gilpin County', 'Glacier County', 'Glades County', 'Gladwin County', 'Glascock County', 'Glasscock County', 'Glenn County', 'Gloucester County', 'Glynn County', 'Gogebic County', 'Golden Valley County', 'Goliad County', 'Gonzales County', 'Goochland County', 'Goodhue County', 'Gooding County', 'Gordon County', 'Goshen County', 'Gosper County', 'Gove County', 'Grady County', 'Grafton County', 'Graham County', 'Grainger County', 'Grand County', 'Grand Forks County', 'Grand Isle County', 'Grand Traverse County', 'Granite County', 'Grant County', 'Grant Parish', 'Granville County', 'Gratiot County', 'Graves County', 'Gray County', 'Grays Harbor County', 'Grayson County', 'Greeley County', 'Greenbrier County', 'Green County', 'Greene County', 'Green Lake County', 'Greenlee County', 'Greensville County', 'Greenup County', 'Greenville County', 'Greenwood County', 'Greer County', 'Gregg County', 'Gregory County', 'Grenada County', 'Griggs County', 'Grimes County', 'Grundy County', 'Guadalupe County', 'Guernsey County', 'Guilford County', 'Gulf County', 'Gunnison County', 'Guthrie County', 'Gwinnett County', 'Haakon County', 'Habersham County', 'Haines Borough', 'Hale County', 'Halifax County', 'Hall County', 'Hamblen County', 'Hamilton County', 'Hamlin County', 'Hampden County', 'Hampshire County', 'Hampton city', 'Hampton County', 'Hancock County', 'Hand County', 'Hanover County', 'Hansford County', 'Hanson County', 'Haralson County', 'Hardee County', 'Hardeman County', 'Hardin County', 'Harding County', 'Hardy County', 'Harford County', 'Harlan County', 'Harmon County', 'Harnett County', 'Harney County', 'Harper County', 'Harris County', 'Harrisonburg city', 'Harrison County', 'Hart County', 'Hartford County', 'Hartley County', 'Harvey County', 'Haskell County', 'Hawaii County', 'Hawkins County', 'Hayes County', 'Hays County', 'Haywood County', 'Heard County', 'Hemphill County', 'Hempstead County', 'Henderson County', 'Hendricks County', 'Hendry County', 'Hennepin County', 'Henrico County', 'Henry County', 'Herkimer County', 'Hernando County', 'Hertford County', 'Hettinger County', 'Hickman County', 'Hickory County', 'Hidalgo County', 'Highland County', 'Highlands County', 'Hill County', 'Hillsborough County', 'Hillsdale County', 'Hinds County', 'Hinsdale County', 'Hitchcock County', 'Hocking County', 'Hockley County', 'Hodgeman County', 'Hoke County', 'Holmes County', 'Holt County', 'Honolulu County', 'Hood County', 'Hood River County', 'Hooker County', 'Hopewell city', 'Hopkins County', 'Horry County', 'Hot Spring County', 'Hot Springs County', 'Houghton County', 'Houston County', 'Howard County', 'Howell County', 'Hubbard County', 'Hudson County', 'Hudspeth County', 'Huerfano County', 'Hughes County', 'Humboldt County', 'Humphreys County', 'Hunt County', 'Hunterdon County', 'Huntingdon County', 'Huntington County', 'Huron County', 'Hutchinson County', 'Hyde County', 'Iberia Parish', 'Iberville Parish', 'Ida County', 'Idaho County', 'Imperial County', 'Independence County', 'Indiana County', 'Indian River County', 'Ingham County', 'Inyo County', 'Ionia County', 'Iosco County', 'Iowa County', 'Iredell County', 'Irion County', 'Iron County', 'Iroquois County', 'Irwin County', 'Isabella County', 'Isanti County', 'Island County', 'Isle of Wight County', 'Issaquena County', 'Itasca County', 'Itawamba County', 'Izard County', 'Jack County', 'Jackson County', 'Jackson Parish', 'James City County', 'Jasper County', 'Jay County', 'Jeff Davis County', 'Jefferson County', 'Jefferson Davis County', 'Jefferson Davis Parish', 'Jefferson Parish', 'Jenkins County', 'Jennings County', 'Jerauld County', 'Jerome County', 'Jersey County', 'Jessamine County', 'Jewell County', 'Jim Hogg County', 'Jim Wells County', 'Jo Daviess County', 'Johnson County', 'Johnston County', 'Jones County', 'Josephine County', 'Juab County', 'Judith Basin County', 'Juneau Borough', 'Juneau County', 'Juniata County', 'Kalamazoo County', 'Kalkaska County', 'Kanabec County', 'Kanawha County', 'Kandiyohi County', 'Kane County', 'Kankakee County', 'Karnes County', 'Kauai County', 'Kaufman County', 'Kay County', 'Kearney County', 'Kearny County', 'Keith County', 'Kemper County', 'Kenai Peninsula Borough', 'Kendall County', 'Kenedy County', 'Kennebec County', 'Kenosha County', 'Kent County', 'Kenton County', 'Keokuk County', 'Kern County', 'Kerr County', 'Kershaw County', 'Ketchikan Gateway Borough', 'Kewaunee County', 'Keweenaw County', 'Keya Paha County', 'Kidder County', 'Kimball County', 'Kimble County', 'King and Queen County', 'King County', 'Kingfisher County', 'King George County', 'Kingman County', 'Kingsbury County', 'Kings County', 'King William County', 'Kinney County', 'Kiowa County', 'Kit Carson County', 'Kitsap County', 'Kittitas County', 'Kittson County', 'Klamath County', 'Kleberg County', 'Klickitat County', 'Knott County', 'Knox County', 'Kodiak Island Borough', 'Koochiching County', 'Kootenai County', 'Kosciusko County', 'Kossuth County', 'Labette County', 'Lackawanna County', 'Laclede County', 'Lac qui Parle County', 'La Crosse County', 'Lafayette County', 'Lafayette Parish', 'Lafourche Parish', 'Lagrange County', 'Lake and Peninsula Borough', 'Lake County', 'Lake of the Woods County', 'Lamar County', 'Lamb County', 'Lamoille County', 'LaMoure County', 'Lampasas County', 'Lancaster County', 'Lander County', 'Lane County', 'Langlade County', 'Lanier County', 'La Paz County', 'Lapeer County', 'La Plata County', 'La Porte County', 'Laramie County', 'Larimer County', 'Larue County', 'La Salle County', 'La Salle Parish', 'Las Animas County', 'Lassen County', 'Latah County', 'Latimer County', 'Lauderdale County', 'Laurel County', 'Laurens County', 'Lavaca County', 'Lawrence County', 'Lea County', 'Leake County', 'Leavenworth County', 'Lebanon County', 'Lee County', 'Leelanau County', 'Leflore County', 'Le Flore County', 'Lehigh County', 'Lemhi County', 'Lenawee County', 'Lenoir County', 'Leon County', 'Leslie County', 'Le Sueur County', 'Letcher County', 'Levy County', 'Lewis and Clark County', 'Lewis County', 'Lexington city', 'Lexington County', 'Liberty County', 'Licking County', 'Limestone County', 'Lincoln County', 'Lincoln Parish', 'Linn County', 'Lipscomb County', 'Litchfield County', 'Little River County', 'Live Oak County', 'Livingston County', 'Livingston Parish', 'Llano County', 'Logan County', 'Long County', 'Lonoke County', 'Lorain County', 'Los Alamos County', 'Los Angeles County', 'Loudon County', 'Loudoun County', 'Louisa County', 'Loup County', 'Love County', 'Loving County', 'Lowndes County', 'Lubbock County', 'Lucas County', 'Luce County', 'Lumpkin County', 'Luna County', 'Lunenburg County', 'Luzerne County', 'Lycoming County', 'Lyman County', 'Lynchburg city', 'Lynn County', 'Lyon County', 'Mackinac County', 'Macomb County', 'Macon County', 'Macoupin County', 'Madera County', 'Madison County', 'Madison Parish', 'Magoffin County', 'Mahaska County', 'Mahnomen County', 'Mahoning County', 'Major County', 'Malheur County', 'Manassas city', 'Manassas Park city', 'Manatee County', 'Manistee County', 'Manitowoc County', 'Marathon County', 'Marengo County', 'Maricopa County', 'Maries County', 'Marin County', 'Marinette County', 'Marion County', 'Mariposa County', 'Marlboro County', 'Marquette County', 'Marshall County', 'Martin County', 'Martinsville city', 'Mason County', 'Massac County', 'Matagorda County', 'Matanuska-Susitna Borough', 'Mathews County', 'Maui County', 'Maury County', 'Maverick County', 'Mayes County', 'McClain County', 'McCone County', 'McCook County', 'McCormick County', 'McCracken County', 'McCreary County', 'McCulloch County', 'McCurtain County', 'McDonald County', 'McDonough County', 'McDowell County', 'McDuffie County', 'McHenry County', 'McIntosh County', 'McKean County', 'McKenzie County', 'McKinley County', 'McLean County', 'McLennan County', 'McLeod County', 'McMinn County', 'McMullen County', 'McNairy County', 'McPherson County', 'Meade County', 'Meagher County', 'Mecklenburg County', 'Mecosta County', 'Medina County', 'Meeker County', 'Meigs County', 'Mellette County', 'Menard County', 'Mendocino County', 'Menifee County', 'Menominee County', 'Merced County', 'Mercer County', 'Meriwether County', 'Merrick County', 'Merrimack County', 'Mesa County', 'Metcalfe County', 'Miami County', 'Middlesex County', 'Midland County', 'Mifflin County', 'Milam County', 'Millard County', 'Mille Lacs County', 'Miller County', 'Mills County', 'Milwaukee County', 'Mineral County', 'Miner County', 'Mingo County', 'Minidoka County', 'Minnehaha County', 'Missaukee County', 'Mississippi County', 'Missoula County', 'Mitchell County', 'Mobile County', 'Modoc County', 'Moffat County', 'Mohave County', 'Moniteau County', 'Monmouth County', 'Mono County', 'Monona County', 'Monongalia County', 'Monroe County', 'Montague County', 'Montcalm County', 'Monterey County', 'Montezuma County', 'Montgomery County', 'Montmorency County', 'Montour County', 'Montrose County', 'Moody County', 'Moore County', 'Mora County', 'Morehouse Parish', 'Morgan County', 'Morrill County', 'Morris County', 'Morrison County', 'Morrow County', 'Morton County', 'Motley County', 'Moultrie County', 'Mountrail County', 'Mower County', 'Muhlenberg County', 'Multnomah County', 'Murray County', 'Muscatine County', 'Muscogee County', 'Muskegon County', 'Muskingum County', 'Muskogee County', 'Musselshell County', 'Nacogdoches County', 'Nance County', 'Nantucket County', 'Napa County', 'Nash County', 'Nassau County', 'Natchitoches Parish', 'Natrona County', 'Navajo County', 'Navarro County', 'Nelson County', 'Nemaha County', 'Neosho County', 'Neshoba County', 'Ness County', 'Nevada County', 'Newaygo County', 'Newberry County', 'New Castle County', 'New Hanover County', 'New Haven County', 'New Kent County', 'New London County', 'New Madrid County', 'Newport County', 'Newport News city', 'Newton County', 'New York County', 'Nez Perce County', 'Niagara County', 'Nicholas County', 'Nicollet County', 'Niobrara County', 'Noble County', 'Nobles County', 'Nodaway County', 'Nolan County', 'Nome Census Area', 'Norfolk city', 'Norfolk County', 'Norman County', 'Northampton County', 'North Slope Borough', 'Northumberland County', 'Northwest Arctic Borough', 'Norton city', 'Norton County', 'Nottoway County', 'Nowata County', 'Noxubee County', 'Nuckolls County', 'Nueces County', 'Nye County', 'Oakland County', 'Obion County', 'O-Brien County', 'Oceana County', 'Ocean County', 'Ochiltree County', 'Oconee County', 'Oconto County', 'Ogemaw County', 'Ogle County', 'Oglethorpe County', 'Ohio County', 'Okaloosa County', 'Okanogan County', 'Okeechobee County', 'Okfuskee County', 'Oklahoma County', 'Okmulgee County', 'Oktibbeha County', 'Oldham County', 'Oliver County', 'Olmsted County', 'Oneida County', 'Onondaga County', 'Onslow County', 'Ontario County', 'Ontonagon County', 'Orangeburg County', 'Orange County', 'Oregon County', 'Orleans County', 'Orleans Parish', 'Osage County', 'Osborne County', 'Osceola County', 'Oscoda County', 'Oswego County', 'Otero County', 'Otoe County', 'Otsego County', 'Ottawa County', 'Otter Tail County', 'Ouachita County', 'Ouachita Parish', 'Ouray County', 'Outagamie County', 'Overton County', 'Owen County', 'Owsley County', 'Owyhee County', 'Oxford County', 'Ozark County', 'Ozaukee County', 'Pacific County', 'Page County', 'Palm Beach County', 'Palo Alto County', 'Palo Pinto County', 'Pamlico County', 'Panola County', 'Park County', 'Parke County', 'Parker County', 'Parmer County', 'Pasco County', 'Pasquotank County', 'Passaic County', 'Patrick County', 'Paulding County', 'Pawnee County', 'Payette County', 'Payne County', 'Peach County', 'Pearl River County', 'Pecos County', 'Pembina County', 'Pemiscot County', 'Pender County', 'Pendleton County', 'Pend Oreille County', 'Pennington County', 'Penobscot County', 'Peoria County', 'Pepin County', 'Perkins County', 'Perquimans County', 'Perry County', 'Pershing County', 'Person County', 'Petersburg city', 'Petroleum County', 'Pettis County', 'Phelps County', 'Philadelphia County', 'Phillips County', 'Piatt County', 'Pickaway County', 'Pickens County', 'Pickett County', 'Pierce County', 'Pike County', 'Pima County', 'Pinal County', 'Pine County', 'Pinellas County', 'Pipestone County', 'Piscataquis County', 'Pitkin County', 'Pitt County', 'Pittsburg County', 'Pittsylvania County', 'Piute County', 'Placer County', 'Plaquemines Parish', 'Platte County', 'Pleasants County', 'Plumas County', 'Plymouth County', 'Pocahontas County', 'Poinsett County', 'Pointe Coupee Parish', 'Polk County', 'Pondera County', 'Pontotoc County', 'Pope County', 'Poquoson city', 'Portage County', 'Porter County', 'Portsmouth city', 'Posey County', 'Pottawatomie County', 'Pottawattamie County', 'Potter County', 'Powder River County', 'Powell County', 'Power County', 'Poweshiek County', 'Powhatan County', 'Prairie County', 'Pratt County', 'Preble County', 'Prentiss County', 'Presidio County', 'Presque Isle County', 'Preston County', 'Price County', 'Prince Edward County', 'Prince George County', 'Prince William County', 'Providence County', 'Prowers County', 'Pueblo County', 'Pulaski County', 'Pushmataha County', 'Putnam County', 'Quay County', 'Queen Anne County', 'Queens County', 'Quitman County', 'Rabun County', 'Racine County', 'Radford city', 'Rains County', 'Raleigh County', 'Ralls County', 'Ramsey County', 'Randall County', 'Randolph County', 'Rankin County', 'Ransom County', 'Rapides Parish', 'Rappahannock County', 'Ravalli County', 'Rawlins County', 'Ray County', 'Reagan County', 'Real County', 'Red Lake County', 'Red River County', 'Red River Parish', 'Red Willow County', 'Redwood County', 'Reeves County', 'Refugio County', 'Reno County', 'Rensselaer County', 'Renville County', 'Republic County', 'Reynolds County', 'Rhea County', 'Rice County', 'Richardson County', 'Rich County', 'Richland County', 'Richland Parish', 'Richmond city', 'Richmond County', 'Riley County', 'Ringgold County', 'Rio Arriba County', 'Rio Blanco County', 'Rio Grande County', 'Ripley County', 'Ritchie County', 'Riverside County', 'Roane County', 'Roanoke city', 'Roanoke County', 'Roberts County', 'Robertson County', 'Robeson County', 'Rockbridge County', 'Rockcastle County', 'Rock County', 'Rockdale County', 'Rockingham County', 'Rock Island County', 'Rockland County', 'Rockwall County', 'Roger Mills County', 'Rogers County', 'Rolette County', 'Rooks County', 'Roosevelt County', 'Roscommon County', 'Roseau County', 'Rosebud County', 'Ross County', 'Routt County', 'Rowan County', 'Runnels County', 'Rush County', 'Rusk County', 'Russell County', 'Rutherford County', 'Rutland County', 'Sabine County', 'Sabine Parish', 'Sac County', 'Sacramento County', 'Sagadahoc County', 'Saginaw County', 'Saguache County', 'Salem city', 'Salem County', 'Saline County', 'Salt Lake County', 'Saluda County', 'Sampson County', 'San Augustine County', 'San Benito County', 'San Bernardino County', 'Sanborn County', 'Sanders County', 'San Diego County', 'Sandoval County', 'Sandusky County', 'San Francisco County', 'Sangamon County', 'Sanilac County', 'San Jacinto County', 'San Joaquin County', 'San Juan County', 'San Luis Obispo County', 'San Mateo County', 'San Miguel County', 'San Patricio County', 'Sanpete County', 'San Saba County', 'Santa Barbara County', 'Santa Clara County', 'Santa Cruz County', 'Santa Fe County', 'Santa Rosa County', 'Sarasota County', 'Saratoga County', 'Sargent County', 'Sarpy County', 'Sauk County', 'Saunders County', 'Sawyer County', 'Schenectady County', 'Schleicher County', 'Schley County', 'Schoharie County', 'Schoolcraft County', 'Schuyler County', 'Schuylkill County', 'Scioto County', 'Scotland County', 'Scott County', 'Scotts Bluff County', 'Screven County', 'Scurry County', 'Searcy County', 'Sebastian County', 'Sedgwick County', 'Seminole County', 'Seneca County', 'Sequatchie County', 'Sequoyah County', 'Sevier County', 'Seward County', 'Shackelford County', 'Shannon County', 'Sharkey County', 'Sharp County', 'Shasta County', 'Shawano County', 'Shawnee County', 'Sheboygan County', 'Shelby County', 'Shenandoah County', 'Sherburne County', 'Sheridan County', 'Sherman County', 'Shiawassee County', 'Shoshone County', 'Sibley County', 'Sierra County', 'Silver Bow County', 'Simpson County', 'Sioux County', 'Siskiyou County', 'Sitka Borough', 'Skagit County', 'Skamania County', 'Slope County', 'Smith County', 'Smyth County', 'Snohomish County', 'Snyder County', 'Socorro County', 'Solano County', 'Somerset County', 'Somervell County', 'Sonoma County', 'Southampton County', 'Spalding County', 'Spartanburg County', 'Spencer County', 'Spink County', 'Spokane County', 'Spotsylvania County', 'Stafford County', 'Stanislaus County', 'Stanley County', 'Stanly County', 'Stanton County', 'Stark County', 'Starke County', 'Starr County', 'Staunton city', 'Stearns County', 'Steele County', 'Stephens County', 'Stephenson County', 'Sterling County', 'Steuben County', 'Stevens County', 'Stewart County', 'Stillwater County', 'Stoddard County', 'Stokes County', 'Stone County', 'Stonewall County', 'Storey County', 'Story County', 'Strafford County', 'Stutsman County', 'Sublette County', 'Suffolk city', 'Suffolk County', 'Sullivan County', 'Sully County', 'Summers County', 'Summit County', 'Sumner County', 'Sumter County', 'Sunflower County', 'Surry County', 'Susquehanna County', 'Sussex County', 'Sutter County', 'Sutton County', 'Suwannee County', 'Swain County', 'Sweet Grass County', 'Sweetwater County', 'Swift County', 'Swisher County', 'Switzerland County', 'Talbot County', 'Taliaferro County', 'Talladega County', 'Tallahatchie County', 'Tallapoosa County', 'Tama County', 'Taney County', 'Tangipahoa Parish', 'Taos County', 'Tarrant County', 'Tate County', 'Tattnall County', 'Taylor County', 'Tazewell County', 'Tehama County', 'Telfair County', 'Teller County', 'Tensas Parish', 'Terrebonne Parish', 'Terrell County', 'Terry County', 'Teton County', 'Texas County', 'Thayer County', 'Thomas County', 'Throckmorton County', 'Thurston County', 'Tift County', 'Tillamook County', 'Tillman County', 'Tioga County', 'Tippah County', 'Tippecanoe County', 'Tipton County', 'Tishomingo County', 'Titus County', 'Todd County', 'Tolland County', 'Tom Green County', 'Tompkins County', 'Tooele County', 'Toole County', 'Toombs County', 'Torrance County', 'Towner County', 'Towns County', 'Traill County', 'Transylvania County', 'Traverse County', 'Travis County', 'Treasure County', 'Trego County', 'Trempealeau County', 'Treutlen County', 'Trigg County', 'Trimble County', 'Trinity County', 'Tripp County', 'Troup County', 'Trousdale County', 'Trumbull County', 'Tucker County', 'Tulare County', 'Tulsa County', 'Tunica County', 'Tuolumne County', 'Turner County', 'Tuscaloosa County', 'Tuscarawas County', 'Tuscola County', 'Twiggs County', 'Twin Falls County', 'Tyler County', 'Tyrrell County', 'Uinta County', 'Uintah County', 'Ulster County', 'Umatilla County', 'Unicoi County', 'Union County', 'Union Parish', 'Upshur County', 'Upson County', 'Upton County', 'Utah County', 'Uvalde County', 'Valdez-Cordova Census Area', 'Valencia County', 'Valley County', 'Val Verde County', 'Van Buren County', 'Vance County', 'Vanderburgh County', 'Van Wert County', 'Van Zandt County', 'Venango County', 'Ventura County', 'Vermilion County', 'Vermilion Parish', 'Vermillion County', 'Vernon County', 'Vernon Parish', 'Victoria County', 'Vigo County', 'Vilas County', 'Vinton County', 'Virginia Beach city', 'Volusia County', 'Wabasha County', 'Wabash County', 'Wabaunsee County', 'Wade Hampton Census Area', 'Wadena County', 'Wagoner County', 'Wahkiakum County', 'Wake County', 'Wakulla County', 'Waldo County', 'Walker County', 'Wallace County', 'Walla Walla County', 'Waller County', 'Wallowa County', 'Walsh County', 'Walthall County', 'Walton County', 'Walworth County', 'Wapello County', 'Ward County', 'Ware County', 'Warren County', 'Warrick County', 'Wasatch County', 'Wasco County', 'Waseca County', 'Washakie County', 'Washburn County', 'Washington County', 'Washington Parish', 'Washita County', 'Washoe County', 'Washtenaw County', 'Watauga County', 'Watonwan County', 'Waukesha County', 'Waupaca County', 'Waushara County', 'Wayne County', 'Waynesboro city', 'Weakley County', 'Webb County', 'Weber County', 'Webster County', 'Webster Parish', 'Weld County', 'Wells County', 'West Baton Rouge Parish', 'West Carroll Parish', 'Westchester County', 'West Feliciana Parish', 'Westmoreland County', 'Weston County', 'Wetzel County', 'Wexford County', 'Wharton County', 'Whatcom County', 'Wheatland County', 'Wheeler County', 'White County', 'White Pine County', 'Whiteside County', 'Whitfield County', 'Whitley County', 'Whitman County', 'Wibaux County', 'Wichita County', 'Wicomico County', 'Wilbarger County', 'Wilcox County', 'Wilkes County', 'Wilkin County', 'Wilkinson County', 'Willacy County', 'Will County', 'Williamsburg city', 'Williamsburg County', 'Williams County', 'Williamson County', 'Wilson County', 'Winchester city', 'Windham County', 'Windsor County', 'Winkler County', 'Winnebago County', 'Winneshiek County', 'Winn Parish', 'Winona County', 'Winston County', 'Wirt County', 'Wise County', 'Wolfe County', 'Woodbury County', 'Wood County', 'Woodford County', 'Woodruff County', 'Woods County', 'Woodson County', 'Woodward County', 'Worcester County', 'Worth County', 'Wright County', 'Wyandot County', 'Wyandotte County', 'Wyoming County', 'Wythe County', 'Yadkin County', 'Yakima County', 'Yakutat Borough', 'Yalobusha County', 'Yamhill County', 'Yancey County', 'Yankton County', 'Yates County', 'Yavapai County', 'Yazoo County', 'Yell County', 'Yellow Medicine County', 'Yellowstone County', 'Yoakum County', 'Yolo County', 'York County', 'Young County', 'Yuba County', 'Yukon-Koyukuk Census Area', 'Yuma County', 'Zapata County', 'Zavala County', 'Ziebach County', ) ZIP_CODES = ( "00601", "00608", "00626", "00649", "00659", "00668", "00669", "00716", "00725", "00728", "00741", "00749", "00750", "00762", "00764", "00769", "00791", "00794", "00804", "00816", "00836", "00844", "00862", "00868", "00875", "00896", "00897", "00902", "00909", "00919", "00923", "00925", "00944", "00969", "00999", "01008", "01011", "01018", "01019", "01028", "01051", "01092", "01099", "01114", "01119", "01125", "01134", "01140", "01173", "01184", "01186", "01187", "01189", "01213", "01218", "01231", "01234", "01235", "01262", "01281", "01302", "01325", "01332", "01344", "01366", "01452", "01454", "01494", "01499", "01513", "01519", "01565", "01574", "01675", "01683", "01687", "01704", "01740", "01743", "01747", "01750", "01760", "01801", "01804", "01806", "01809", "01816", "01818", "01819", "01829", "01878", "01889", "01945", "01987", "01998", "02003", "02009", "02021", "02022", "02023", "02053", "02121", "02129", "02145", "02190", "02239", "02249", "02251", "02254", "02269", "02275", "02285", "02292", "02311", "02315", "02332", "02349", "02357", "02364", "02381", "02382", "02392", "02397", "02421", "02424", "02438", "02444", "02452", "02454", "02504", "02533", "02534", "02544", "02552", "02633", "02637", "02644", "02646", "02663", "02682", "02705", "02706", "02723", "02750", "02805", "02810", "02819", "02824", "02844", "02876", "02884", "02891", "02893", "02897", "02924", "02966", "03044", "03055", "03059", "03077", "03103", "03132", "03162", "03165", "03221", "03229", "03286", "03298", "03324", "03329", "03331", "03338", "03408", "03412", "03414", "03419", "03434", "03484", "03498", "03524", "03535", "03603", "03627", "03675", "03709", "03788", "03835", "03887", "03889", "03892", "03898", "03901", "03942", "03960", "03972", "03973", "03994", "04005", "04025", "04045", "04051", "04081", "04111", "04124", "04130", "04192", "04195", "04204", "04228", "04283", "04304", "04388", "04390", "04404", "04424", "04445", "04468", "04541", "04551", "04557", "04558", "04584", "04627", "04645", "04662", "04674", "04690", "04692", "04698", "04704", "04707", "04720", "04736", "04737", "04745", "04753", "04767", "04769", "04774", "04788", "04790", "04812", "04840", "04844", "04853", "04867", "04873", "04889", "04924", "04938", "04955", "04969", "05008", "05022", "05029", "05062", "05064", "05066", "05089", "05125", "05136", "05146", "05154", "05192", "05193", "05202", "05214", "05242", "05286", "05294", "05348", "05349", "05352", "05369", "05443", "05451", "05454", "05519", "05575", "05580", "05581", "05593", "05618", "05638", "05679", "05685", "05714", "05715", "05719", "05724", "05783", "05841", "05858", "05871", "05872", "05881", "05894", "05954", "06002", "06013", "06020", "06023", "06036", "06058", "06064", "06065", "06069", "06091", "06095", "06101", "06108", "06121", "06166", "06176", "06191", "06205", "06221", "06223", "06225", "06269", "06273", "06276", "06281", "06309", "06317", "06352", "06399", "06404", "06417", "06419", "06443", "06458", "06467", "06488", "06494", "06503", "06545", "06557", "06565", "06566", "06575", "06580", "06585", "06587", "06600", "06645", "06660", "06674", "06698", "06715", "06734", "06792", "06796", "06835", "06845", "06877", "06884", "06891", "06894", "06940", "06943", "06964", "06978", "06986", "07031", "07057", "07110", "07117", "07134", "07149", "07157", "07175", "07194", "07207", "07214", "07222", "07253", "07268", "07296", "07297", "07302", "07319", "07347", "07349", "07387", "07388", "07396", "07398", "07420", "07467", "07471", "07493", "07496", "07509", "07538", "07555", "07564", "07571", "07584", "07597", "07621", "07622", "07657", "07666", "07775", "07872", "07892", "07917", "07934", "07954", "07969", "08011", "08041", "08084", "08129", "08134", "08137", "08138", "08209", "08244", "08246", "08283", "08287", "08302", "08313", "08345", "08346", "08348", "08352", "08383", "08438", "08496", "08532", "08536", "08540", "08541", "08551", "08595", "08601", "08603", "08614", "08641", "08648", "08654", "08659", "08675", "08680", "08687", "08695", "08719", "08740", "08764", "08765", "08767", "08809", "08822", "08852", "08880", "08883", "08937", "08939", "08942", "08954", "08970", "08971", "08975", "08989", "09030", "09034", "09082", "09096", "09119", "09125", "09129", "09148", "09156", "09167", "09179", "09189", "09193", "09205", "09217", "09254", "09321", "09322", "09354", "09367", "09373", "09384", "09385", "09411", "09428", "09431", "09452", "09454", "09477", "09483", "09502", "09509", "09515", "09525", "09530", "09532", "09534", "09548", "09550", "09566", "09568", "09571", "09583", "09584", "09594", "09603", "09614", "09634", "09637", "09651", "09672", "09681", "09689", "09701", "09712", "09737", "09745", "09765", "09793", "09818", "09831", "09837", "09840", "09843", "09858", "09875", "09903", "09905", "09906", "09910", "09918", "09920", "09943", "09951", "09958", "09966", "09971", "09981", "09987", "09991", "09998", "10001", "10008", "10026", "10059", "10068", "10069", "10116", "10125", "10141", "10150", "10156", "10162", "10164", "10169", "10191", "10194", "10216", "10236", "10262", "10268", "10275", "10296", "10302", "10307", "10309", "10314", "10317", "10319", "10325", "10336", "10344", "10369", "10382", "10399", "10408", "10411", "10414", "10418", "10419", "10444", "10451", "10492", "10499", "10519", "10525", "10534", "10540", "10573", "10584", "10586", "10587", "10589", "10613", "10618", "10631", "10634", "10635", "10636", "10662", "10663", "10679", "10689", "10725", "10732", "10744", "10757", "10765", "10766", "10844", "10852", "10854", "10870", "10894", "10899", "10913", "10918", "10919", "10965", "11075", "11083", "11087", "11140", "11143", "11147", "11160", "11176", "11178", "11185", "11187", "11201", "11204", "11206", "11209", "11216", "11218", "11229", "11233", "11289", "11294", "11338", "11364", "11387", "11398", "11400", "11403", "11409", "11421", "11422", "11423", "11425", "11479", "11521", "11523", "11525", "11529", "11545", "11564", "11581", "11620", "11621", "11639", "11654", "11669", "11675", "11686", "11692", "11711", "11715", "11721", "11732", "11749", "11757", "11767", "11777", "11781", "11797", "11818", "11821", "11824", "11838", "11844", "11852", "11854", "11877", "11883", "11888", "11900", "11904", "11933", "11934", "11944", "11952", "11985", "12022", "12033", "12037", "12044", "12046", "12063", "12105", "12106", "12123", "12124", "12145", "12150", "12185", "12193", "12205", "12210", "12219", "12224", "12244", "12276", "12284", "12291", "12293", "12297", "12324", "12352", "12366", "12400", "12455", "12459", "12477", "12503", "12522", "12532", "12562", "12565", "12621", "12629", "12645", "12648", "12668", "12686", "12698", "12724", "12738", "12808", "12812", "12814", "12819", "12834", "12891", "12898", "12924", "12935", "12952", "13003", "13027", "13075", "13188", "13191", "13235", "13252", "13298", "13301", "13318", "13342", "13360", "13368", "13372", "13373", "13394", "13396", "13405", "13425", "13445", "13447", "13451", "13481", "13511", "13524", "13592", "13595", "13604", "13622", "13628", "13683", "13764", "13788", "13790", "13804", "13824", "13830", "13868", "13883", "13951", "13957", "13984", "14027", "14062", "14072", "14074", "14092", "14098", "14104", "14107", "14120", "14136", "14145", "14153", "14167", "14169", "14174", "14188", "14190", "14212", "14219", "14240", "14244", "14253", "14273", "14289", "14324", "14338", "14360", "14369", "14408", "14462", "14464", "14466", "14489", "14525", "14536", "14546", "14554", "14561", "14593", "14602", "14614", "14629", "14642", "14686", "14694", "14749", "14752", "14843", "14851", "14854", "14919", "14975", "14980", "14993", "15018", "15038", "15079", "15085", "15114", "15115", "15119", "15124", "15143", "15167", "15241", "15258", "15272", "15281", "15290", "15294", "15319", "15354", "15386", "15402", "15413", "15423", "15464", "15465", "15495", "15501", "15508", "15521", "15532", "15566", "15576", "15590", "15591", "15605", "15615", "15621", "15623", "15625", "15669", "15673", "15676", "15681", "15695", "15709", "15717", "15743", "15752", "15781", "15799", "15802", "15804", "15817", "15819", "15858", "15867", "15903", "15922", "15933", "15945", "15965", "15980", "15985", "15990", "16000", "16045", "16053", "16060", "16074", "16075", "16088", "16098", "16115", "16134", "16149", "16192", "16196", "16240", "16245", "16277", "16284", "16291", "16340", "16343", "16364", "16378", "16386", "16454", "16457", "16475", "16489", "16497", "16510", "16534", "16539", "16549", "16557", "16575", "16594", "16614", "16622", "16653", "16668", "16693", "16696", "16697", "16719", "16747", "16787", "16788", "16798", "16801", "16820", "16867", "16871", "16893", "16894", "16896", "16909", "16913", "16938", "16944", "16955", "16971", "16984", "16997", "17018", "17021", "17023", "17039", "17057", "17066", "17172", "17219", "17237", "17272", "17292", "17317", "17319", "17333", "17334", "17354", "17411", "17441", "17529", "17537", "17538", "17564", "17595", "17609", "17644", "17682", "17683", "17687", "17702", "17743", "17745", "17746", "17752", "17783", "17820", "17838", "17896", "17912", "17932", "17934", "17936", "17940", "17941", "17951", "17991", "17995", "18001", "18003", "18014", "18018", "18041", "18048", "18054", "18057", "18059", "18075", "18087", "18095", "18119", "18124", "18140", "18164", "18165", "18167", "18205", "18209", "18222", "18223", "18239", "18249", "18252", "18274", "18280", "18339", "18354", "18370", "18371", "18375", "18391", "18434", "18482", "18519", "18525", "18529", "18567", "18578", "18579", "18605", "18617", "18721", "18722", "18754", "18767", "18773", "18784", "18785", "18811", "18828", "18862", "18877", "18883", "18899", "18909", "18924", "18930", "18948", "18971", "18988", "18994", "19003", "19037", "19089", "19101", "19120", "19145", "19162", "19165", "19188", "19193", "19230", "19231", "19236", "19237", "19275", "19303", "19305", "19306", "19310", "19317", "19343", "19351", "19387", "19391", "19398", "19431", "19452", "19454", "19471", "19501", "19515", "19532", "19534", "19550", "19568", "19583", "19584", "19614", "19634", "19651", "19672", "19681", "19698", "19785", "19818", "19840", "19843", "19858", "19865", "19920", "19938", "19981", "20001", "20008", "20026", "20029", "20049", "20059", "20068", "20069", "20116", "20125", "20128", "20141", "20149", "20150", "20156", "20162", "20163", "20164", "20169", "20191", "20193", "20194", "20204", "20216", "20224", "20238", "20244", "20245", "20262", "20268", "20275", "20278", "20296", "20297", "20302", "20307", "20309", "20311", "20314", "20319", "20320", "20323", "20325", "20330", "20344", "20369", "20381", "20383", "20399", "20407", "20408", "20411", "20414", "20418", "20419", "20428", "20444", "20445", "20451", "20468", "20481", "20485", "20492", "20499", "20514", "20519", "20522", "20525", "20526", "20534", "20540", "20573", "20584", "20586", "20587", "20589", "20613", "20618", "20631", "20634", "20635", "20647", "20663", "20681", "20689", "20702", "20725", "20728", "20732", "20744", "20765", "20766", "20817", "20844", "20851", "20852", "20854", "20858", "20875", "20894", "20899", "20913", "20918", "20919", "20945", "20965", "20974", "20982", "20986", "21075", "21083", "21087", "21103", "21104", "21140", "21143", "21147", "21150", "21160", "21176", "21184", "21185", "21187", "21201", "21204", "21206", "21209", "21216", "21218", "21219", "21229", "21278", "21286", "21289", "21294", "21309", "21320", "21327", "21338", "21364", "21373", "21379", "21387", "21398", "21400", "21403", "21409", "21422", "21423", "21425", "21439", "21495", "21513", "21521", "21523", "21525", "21529", "21545", "21549", "21564", "21581", "21606", "21620", "21621", "21639", "21649", "21654", "21666", "21669", "21675", "21685", "21686", "21692", "21711", "21715", "21721", "21732", "21733", "21749", "21753", "21757", "21764", "21767", "21777", "21781", "21782", "21792", "21795", "21802", "21811", "21818", "21821", "21824", "21830", "21838", "21844", "21851", "21852", "21854", "21869", "21877", "21883", "21900", "21904", "21933", "21934", "21944", "21952", "22000", "22005", "22024", "22033", "22046", "22063", "22082", "22095", "22105", "22106", "22123", "22139", "22145", "22150", "22193", "22205", "22210", "22219", "22224", "22244", "22255", "22258", "22276", "22281", "22284", "22291", "22293", "22297", "22324", "22352", "22366", "22403", "22444", "22455", "22459", "22477", "22503", "22522", "22532", "22539", "22562", "22565", "22621", "22629", "22633", "22645", "22648", "22668", "22686", "22698", "22704", "22724", "22729", "22738", "22758", "22807", "22808", "22812", "22814", "22819", "22834", "22884", "22891", "22894", "22898", "22917", "22924", "22935", "23003", "23027", "23054", "23075", "23094", "23175", "23191", "23199", "23235", "23252", "23289", "23292", "23298", "23301", "23308", "23342", "23360", "23372", "23373", "23394", "23396", "23405", "23425", "23445", "23451", "23481", "23511", "23521", "23524", "23540", "23592", "23595", "23604", "23607", "23617", "23622", "23628", "23629", "23647", "23664", "23683", "23704", "23725", "23764", "23788", "23790", "23804", "23811", "23824", "23834", "23845", "23852", "23868", "23879", "23883", "23898", "23918", "23941", "23951", "23957", "23958", "23984", "23989", "24004", "24027", "24029", "24039", "24045", "24062", "24074", "24090", "24092", "24098", "24104", "24106", "24107", "24120", "24136", "24137", "24145", "24146", "24153", "24167", "24169", "24174", "24188", "24190", "24212", "24219", "24240", "24244", "24253", "24254", "24255", "24267", "24273", "24289", "24312", "24324", "24338", "24355", "24356", "24360", "24369", "24372", "24387", "24408", "24422", "24429", "24435", "24462", "24464", "24466", "24484", "24489", "24525", "24536", "24546", "24554", "24561", "24569", "24592", "24593", "24597", "24602", "24614", "24615", "24642", "24686", "24689", "24694", "24742", "24744", "24748", "24749", "24752", "24760", "24769", "24820", "24843", "24851", "24854", "24919", "24925", "24966", "24975", "24980", "24981", "24993", "25010", "25018", "25038", "25079", "25085", "25097", "25114", "25115", "25119", "25124", "25143", "25183", "25236", "25241", "25247", "25258", "25271", "25272", "25274", "25281", "25290", "25294", "25319", "25325", "25354", "25386", "25391", "25392", "25402", "25413", "25423", "25436", "25442", "25460", "25464", "25465", "25495", "25497", "25501", "25508", "25521", "25532", "25547", "25553", "25566", "25576", "25580", "25590", "25591", "25597", "25605", "25621", "25623", "25625", "25642", "25669", "25676", "25681", "25695", "25709", "25717", "25743", "25752", "25798", "25799", "25802", "25804", "25817", "25819", "25858", "25867", "25888", "25894", "25903", "25933", "25945", "25957", "25965", "25966", "25975", "25980", "25985", "25987", "25990", "26000", "26033", "26045", "26053", "26060", "26074", "26075", "26097", "26098", "26115", "26134", "26156", "26192", "26196", "26197", "26235", "26237", "26239", "26240", "26245", "26261", "26277", "26282", "26284", "26291", "26294", "26340", "26343", "26364", "26378", "26386", "26389", "26397", "26431", "26454", "26457", "26489", "26510", "26517", "26534", "26539", "26549", "26557", "26575", "26594", "26607", "26614", "26622", "26653", "26668", "26693", "26696", "26697", "26699", "26702", "26704", "26719", "26747", "26749", "26787", "26788", "26796", "26798", "26820", "26830", "26867", "26871", "26888", "26893", "26896", "26909", "26913", "26933", "26938", "26944", "26955", "26964", "26971", "26984", "26992", "26995", "26997", "27021", "27022", "27023", "27039", "27042", "27057", "27066", "27098", "27113", "27119", "27149", "27175", "27207", "27219", "27253", "27272", "27280", "27292", "27317", "27319", "27322", "27334", "27377", "27389", "27395", "27411", "27422", "27441", "27484", "27529", "27534", "27537", "27538", "27563", "27564", "27570", "27595", "27609", "27644", "27658", "27671", "27682", "27683", "27687", "27702", "27713", "27743", "27745", "27746", "27748", "27750", "27752", "27759", "27783", "27820", "27835", "27838", "27841", "27845", "27854", "27880", "27896", "27903", "27932", "27934", "27936", "27940", "27941", "27995", "27999", "28003", "28014", "28018", "28041", "28048", "28054", "28059", "28075", "28080", "28087", "28095", "28109", "28119", "28140", "28164", "28167", "28209", "28221", "28222", "28223", "28249", "28252", "28257", "28274", "28280", "28283", "28336", "28337", "28339", "28342", "28354", "28370", "28371", "28375", "28389", "28434", "28466", "28482", "28519", "28522", "28525", "28529", "28548", "28556", "28567", "28579", "28587", "28593", "28605", "28637", "28654", "28674", "28721", "28722", "28754", "28767", "28773", "28784", "28811", "28828", "28862", "28877", "28883", "28903", "28909", "28924", "28930", "28948", "28971", "28988", "28994", "29003", "29037", "29044", "29089", "29094", "29101", "29105", "29112", "29120", "29145", "29162", "29165", "29188", "29193", "29230", "29231", "29236", "29237", "29252", "29275", "29303", "29305", "29306", "29309", "29310", "29317", "29318", "29343", "29352", "29358", "29387", "29391", "29398", "29431", "29452", "29454", "29501", "29502", "29515", "29525", "29530", "29532", "29534", "29550", "29553", "29566", "29583", "29584", "29614", "29634", "29651", "29672", "29681", "29698", "29726", "29737", "29766", "29778", "29785", "29793", "29818", "29829", "29840", "29843", "29858", "29865", "29920", "29966", "29981", "30001", "30008", "30026", "30029", "30049", "30056", "30059", "30069", "30116", "30125", "30128", "30141", "30150", "30156", "30162", "30164", "30169", "30191", "30194", "30204", "30216", "30224", "30238", "30245", "30254", "30262", "30268", "30275", "30278", "30296", "30302", "30307", "30309", "30311", "30317", "30319", "30323", "30330", "30336", "30344", "30348", "30369", "30382", "30383", "30386", "30399", "30407", "30408", "30411", "30418", "30419", "30428", "30445", "30451", "30468", "30481", "30492", "30499", "30514", "30519", "30522", "30525", "30526", "30534", "30540", "30573", "30580", "30584", "30586", "30587", "30589", "30618", "30631", "30634", "30635", "30636", "30662", "30679", "30702", "30725", "30728", "30732", "30744", "30757", "30765", "30766", "30817", "30844", "30852", "30854", "30870", "30875", "30894", "30899", "30913", "30918", "30919", "30945", "30965", "30967", "30974", "31075", "31083", "31087", "31103", "31104", "31135", "31140", "31143", "31147", "31150", "31160", "31176", "31185", "31187", "31201", "31204", "31206", "31209", "31216", "31218", "31219", "31229", "31233", "31255", "31278", "31286", "31289", "31294", "31309", "31338", "31379", "31383", "31387", "31398", "31400", "31403", "31409", "31421", "31422", "31423", "31439", "31453", "31495", "31513", "31521", "31523", "31525", "31529", "31545", "31549", "31564", "31590", "31606", "31620", "31639", "31649", "31654", "31666", "31669", "31675", "31685", "31686", "31692", "31711", "31715", "31721", "31732", "31733", "31749", "31753", "31757", "31764", "31767", "31771", "31777", "31781", "31792", "31795", "31802", "31818", "31821", "31824", "31830", "31838", "31844", "31851", "31852", "31854", "31869", "31877", "31883", "31900", "31904", "31933", "31934", "31944", "31952", "32000", "32022", "32024", "32033", "32037", "32063", "32082", "32095", "32105", "32106", "32123", "32124", "32139", "32145", "32150", "32160", "32185", "32193", "32205", "32210", "32219", "32224", "32230", "32244", "32255", "32258", "32276", "32281", "32284", "32291", "32293", "32297", "32324", "32352", "32366", "32389", "32400", "32444", "32455", "32459", "32469", "32477", "32503", "32522", "32532", "32539", "32562", "32565", "32621", "32629", "32633", "32645", "32668", "32686", "32704", "32711", "32724", "32729", "32738", "32807", "32808", "32812", "32814", "32819", "32834", "32884", "32894", "32898", "32924", "32935", "32952", "33003", "33027", "33075", "33094", "33109", "33175", "33191", "33199", "33235", "33252", "33287", "33298", "33301", "33308", "33318", "33342", "33360", "33368", "33372", "33373", "33394", "33396", "33405", "33425", "33445", "33447", "33451", "33481", "33511", "33521", "33524", "33530", "33540", "33592", "33595", "33604", "33607", "33617", "33622", "33628", "33647", "33664", "33683", "33704", "33764", "33788", "33790", "33804", "33824", "33834", "33845", "33852", "33868", "33883", "33918", "33941", "33949", "33951", "33957", "33959", "33971", "33984", "33989", "34004", "34027", "34039", "34045", "34062", "34072", "34074", "34088", "34090", "34092", "34098", "34104", "34107", "34120", "34136", "34137", "34145", "34146", "34153", "34167", "34169", "34174", "34188", "34190", "34212", "34219", "34240", "34244", "34253", "34254", "34255", "34267", "34273", "34286", "34289", "34317", "34324", "34338", "34355", "34356", "34360", "34369", "34372", "34408", "34422", "34429", "34435", "34462", "34464", "34466", "34477", "34484", "34489", "34525", "34536", "34546", "34554", "34561", "34569", "34592", "34593", "34597", "34602", "34614", "34615", "34629", "34642", "34683", "34686", "34694", "34748", "34749", "34752", "34760", "34769", "34820", "34843", "34851", "34854", "34919", "34921", "34966", "34975", "34980", "34981", "34993", "35018", "35038", "35079", "35085", "35097", "35114", "35115", "35119", "35124", "35143", "35183", "35236", "35241", "35247", "35258", "35271", "35272", "35274", "35281", "35290", "35294", "35319", "35325", "35350", "35354", "35386", "35391", "35392", "35402", "35413", "35423", "35436", "35442", "35458", "35460", "35464", "35465", "35469", "35495", "35497", "35501", "35508", "35521", "35532", "35547", "35566", "35576", "35580", "35591", "35597", "35605", "35615", "35623", "35669", "35673", "35676", "35681", "35709", "35717", "35720", "35752", "35781", "35798", "35799", "35802", "35804", "35817", "35819", "35843", "35858", "35867", "35894", "35903", "35922", "35945", "35957", "35965", "35966", "35975", "35980", "35985", "35987", "35990", "36000", "36045", "36053", "36060", "36074", "36088", "36098", "36115", "36134", "36144", "36149", "36156", "36192", "36196", "36197", "36237", "36239", "36245", "36261", "36277", "36282", "36284", "36291", "36294", "36340", "36343", "36364", "36378", "36386", "36397", "36431", "36444", "36454", "36457", "36497", "36510", "36517", "36534", "36549", "36550", "36552", "36557", "36575", "36594", "36607", "36614", "36622", "36653", "36668", "36693", "36696", "36697", "36699", "36702", "36704", "36719", "36747", "36749", "36787", "36788", "36796", "36798", "36801", "36820", "36830", "36853", "36867", "36871", "36888", "36893", "36894", "36896", "36909", "36938", "36944", "36955", "36964", "36971", "36984", "36992", "36995", "36997", "37018", "37021", "37022", "37023", "37039", "37042", "37057", "37066", "37098", "37113", "37119", "37137", "37149", "37172", "37175", "37207", "37219", "37253", "37272", "37292", "37317", "37319", "37333", "37334", "37354", "37369", "37377", "37389", "37395", "37397", "37411", "37422", "37441", "37484", "37529", "37534", "37537", "37538", "37563", "37564", "37570", "37609", "37644", "37646", "37658", "37671", "37682", "37683", "37687", "37702", "37713", "37743", "37745", "37746", "37748", "37750", "37751", "37752", "37759", "37783", "37820", "37835", "37838", "37841", "37845", "37854", "37896", "37903", "37912", "37932", "37934", "37936", "37940", "37941", "37991", "37995", "37999", "38003", "38014", "38018", "38041", "38048", "38054", "38057", "38059", "38075", "38087", "38095", "38109", "38119", "38124", "38137", "38140", "38163", "38164", "38165", "38167", "38205", "38209", "38222", "38223", "38239", "38249", "38252", "38274", "38280", "38336", "38337", "38339", "38342", "38354", "38370", "38371", "38375", "38389", "38391", "38423", "38430", "38434", "38466", "38482", "38496", "38525", "38529", "38548", "38556", "38567", "38579", "38582", "38587", "38589", "38595", "38605", "38617", "38637", "38674", "38721", "38722", "38754", "38767", "38773", "38784", "38785", "38811", "38828", "38862", "38877", "38883", "38899", "38903", "38909", "38930", "38948", "38971", "38994", "39003", "39037", "39044", "39089", "39101", "39112", "39120", "39145", "39162", "39165", "39193", "39230", "39231", "39237", "39252", "39275", "39303", "39305", "39306", "39309", "39310", "39317", "39318", "39343", "39351", "39352", "39358", "39376", "39387", "39391", "39398", "39431", "39452", "39454", "39501", "39515", "39525", "39530", "39532", "39534", "39550", "39553", "39566", "39568", "39583", "39584", "39611", "39614", "39634", "39642", "39651", "39672", "39681", "39698", "39737", "39740", "39750", "39766", "39778", "39785", "39787", "39818", "39829", "39840", "39843", "39858", "39920", "39938", "39959", "39966", "39971", "39981", "40001", "40008", "40026", "40029", "40056", "40059", "40068", "40069", "40116", "40125", "40141", "40149", "40150", "40156", "40162", "40163", "40164", "40169", "40191", "40193", "40194", "40204", "40216", "40238", "40244", "40245", "40262", "40268", "40275", "40278", "40296", "40302", "40309", "40311", "40317", "40319", "40320", "40323", "40330", "40336", "40344", "40369", "40381", "40382", "40383", "40386", "40399", "40407", "40408", "40411", "40414", "40418", "40419", "40444", "40451", "40468", "40485", "40492", "40499", "40514", "40519", "40525", "40526", "40534", "40540", "40573", "40580", "40584", "40586", "40587", "40589", "40613", "40618", "40631", "40634", "40635", "40636", "40647", "40663", "40679", "40681", "40689", "40702", "40725", "40732", "40744", "40757", "40765", "40766", "40844", "40851", "40852", "40854", "40858", "40870", "40875", "40894", "40899", "40913", "40918", "40919", "40945", "40965", "40967", "40974", "40982", "40986", "41075", "41083", "41087", "41135", "41140", "41143", "41147", "41150", "41160", "41176", "41178", "41184", "41185", "41187", "41201", "41204", "41206", "41209", "41216", "41218", "41219", "41229", "41233", "41254", "41255", "41286", "41289", "41294", "41309", "41320", "41345", "41364", "41373", "41379", "41383", "41387", "41398", "41400", "41403", "41409", "41422", "41423", "41425", "41439", "41453", "41479", "41495", "41513", "41521", "41525", "41529", "41545", "41549", "41564", "41590", "41606", "41620", "41621", "41632", "41639", "41649", "41651", "41654", "41666", "41669", "41675", "41685", "41686", "41692", "41711", "41715", "41721", "41732", "41733", "41749", "41753", "41757", "41764", "41771", "41792", "41795", "41802", "41811", "41818", "41821", "41824", "41830", "41838", "41844", "41851", "41852", "41854", "41888", "41900", "41904", "41933", "41934", "41944", "41952", "41985", "42000", "42022", "42033", "42037", "42044", "42063", "42082", "42095", "42105", "42106", "42123", "42145", "42150", "42160", "42185", "42193", "42205", "42210", "42219", "42224", "42230", "42244", "42255", "42276", "42284", "42291", "42293", "42297", "42324", "42366", "42389", "42400", "42444", "42455", "42459", "42477", "42503", "42522", "42539", "42562", "42565", "42621", "42629", "42633", "42645", "42648", "42668", "42686", "42704", "42711", "42724", "42731", "42735", "42738", "42807", "42808", "42812", "42814", "42819", "42884", "42891", "42898", "42924", "42935", "42952", "43003", "43027", "43054", "43075", "43094", "43100", "43109", "43188", "43191", "43199", "43235", "43252", "43287", "43292", "43298", "43301", "43308", "43318", "43342", "43360", "43368", "43372", "43373", "43394", "43396", "43405", "43425", "43445", "43447", "43451", "43481", "43511", "43524", "43540", "43592", "43595", "43604", "43607", "43617", "43622", "43628", "43647", "43664", "43683", "43704", "43725", "43764", "43788", "43790", "43804", "43811", "43824", "43830", "43834", "43845", "43852", "43868", "43879", "43883", "43898", "43918", "43951", "43957", "43958", "43959", "43971", "43984", "43989", "44004", "44027", "44029", "44039", "44045", "44062", "44074", "44090", "44092", "44098", "44104", "44107", "44120", "44136", "44137", "44145", "44153", "44167", "44169", "44174", "44188", "44190", "44212", "44240", "44244", "44253", "44273", "44286", "44289", "44312", "44317", "44324", "44338", "44360", "44369", "44408", "44422", "44429", "44435", "44462", "44464", "44466", "44477", "44484", "44489", "44525", "44536", "44546", "44554", "44561", "44569", "44592", "44593", "44602", "44614", "44615", "44629", "44642", "44686", "44689", "44694", "44742", "44744", "44749", "44752", "44760", "44769", "44820", "44843", "44854", "44919", "44925", "44966", "44975", "44980", "44993", "45010", "45018", "45038", "45079", "45085", "45097", "45114", "45115", "45119", "45124", "45167", "45183", "45236", "45247", "45258", "45271", "45272", "45274", "45281", "45290", "45294", "45319", "45325", "45354", "45386", "45391", "45392", "45402", "45413", "45420", "45423", "45436", "45442", "45460", "45464", "45465", "45469", "45491", "45495", "45497", "45501", "45508", "45521", "45532", "45547", "45553", "45566", "45576", "45580", "45590", "45591", "45605", "45615", "45621", "45623", "45642", "45669", "45673", "45676", "45681", "45695", "45709", "45717", "45720", "45743", "45752", "45781", "45798", "45799", "45804", "45817", "45819", "45858", "45867", "45888", "45894", "45903", "45922", "45945", "45957", "45965", "45975", "45980", "45985", "45990", "46000", "46045", "46060", "46074", "46098", "46115", "46134", "46144", "46149", "46192", "46196", "46197", "46235", "46237", "46239", "46240", "46245", "46261", "46277", "46282", "46284", "46291", "46340", "46343", "46364", "46378", "46386", "46389", "46397", "46431", "46444", "46454", "46457", "46475", "46489", "46497", "46510", "46534", "46539", "46549", "46550", "46552", "46557", "46575", "46594", "46607", "46614", "46622", "46653", "46668", "46693", "46696", "46697", "46702", "46704", "46708", "46719", "46747", "46749", "46787", "46788", "46796", "46798", "46801", "46820", "46830", "46867", "46871", "46888", "46893", "46894", "46896", "46909", "46933", "46938", "46944", "46955", "46964", "46971", "46984", "46995", "46997", "47018", "47021", "47022", "47023", "47039", "47042", "47057", "47066", "47098", "47113", "47119", "47137", "47149", "47172", "47175", "47207", "47237", "47253", "47272", "47280", "47292", "47317", "47319", "47322", "47334", "47369", "47377", "47389", "47395", "47411", "47422", "47441", "47484", "47529", "47534", "47537", "47538", "47564", "47570", "47595", "47609", "47644", "47671", "47682", "47683", "47687", "47702", "47721", "47743", "47745", "47746", "47748", "47750", "47751", "47752", "47759", "47783", "47820", "47835", "47838", "47841", "47845", "47854", "47880", "47896", "47903", "47932", "47934", "47936", "47940", "47941", "47951", "47995", "47997", "47999", "48003", "48014", "48018", "48041", "48048", "48054", "48057", "48059", "48075", "48080", "48087", "48095", "48119", "48124", "48137", "48140", "48163", "48164", "48167", "48205", "48209", "48221", "48222", "48223", "48239", "48252", "48257", "48274", "48280", "48283", "48336", "48337", "48339", "48342", "48354", "48370", "48371", "48375", "48389", "48423", "48430", "48434", "48482", "48519", "48522", "48525", "48529", "48548", "48567", "48579", "48582", "48589", "48593", "48595", "48605", "48617", "48637", "48654", "48674", "48721", "48722", "48754", "48767", "48773", "48784", "48785", "48811", "48828", "48862", "48877", "48883", "48899", "48903", "48909", "48924", "48930", "48935", "48948", "48971", "48988", "48994", "49003", "49037", "49089", "49094", "49101", "49105", "49112", "49120", "49145", "49162", "49165", "49188", "49193", "49230", "49231", "49237", "49252", "49275", "49303", "49305", "49306", "49309", "49310", "49343", "49351", "49352", "49358", "49376", "49387", "49391", "49398", "49431", "49452", "49454", "49471", "49501", "49502", "49515", "49525", "49530", "49532", "49534", "49550", "49553", "49566", "49568", "49583", "49584", "49611", "49614", "49634", "49642", "49651", "49672", "49681", "49698", "49737", "49740", "49750", "49766", "49778", "49785", "49793", "49818", "49829", "49840", "49843", "49858", "49865", "49920", "49938", "49959", "49966", "49971", "49981", "50001", "50008", "50026", "50029", "50049", "50056", "50059", "50068", "50069", "50116", "50125", "50141", "50149", "50150", "50156", "50162", "50163", "50164", "50169", "50191", "50194", "50204", "50216", "50236", "50238", "50244", "50245", "50254", "50262", "50268", "50275", "50296", "50297", "50302", "50307", "50309", "50311", "50314", "50317", "50319", "50320", "50325", "50330", "50336", "50344", "50348", "50369", "50382", "50386", "50399", "50408", "50411", "50414", "50418", "50419", "50428", "50445", "50451", "50468", "50485", "50492", "50499", "50514", "50519", "50525", "50526", "50534", "50540", "50573", "50580", "50587", "50589", "50613", "50618", "50631", "50634", "50635", "50636", "50663", "50679", "50702", "50725", "50728", "50732", "50744", "50757", "50765", "50766", "50817", "50844", "50851", "50852", "50854", "50870", "50894", "50899", "50913", "50919", "50945", "50965", "50967", "50974", "50982", "51075", "51083", "51087", "51104", "51135", "51140", "51143", "51147", "51150", "51160", "51176", "51178", "51184", "51185", "51187", "51201", "51204", "51206", "51209", "51216", "51218", "51219", "51229", "51233", "51254", "51255", "51278", "51286", "51289", "51320", "51327", "51338", "51345", "51355", "51364", "51379", "51387", "51398", "51400", "51403", "51409", "51421", "51423", "51425", "51479", "51513", "51521", "51525", "51529", "51545", "51549", "51581", "51590", "51606", "51620", "51621", "51639", "51649", "51651", "51654", "51669", "51675", "51686", "51692", "51711", "51715", "51721", "51732", "51733", "51749", "51757", "51764", "51771", "51777", "51781", "51782", "51795", "51797", "51802", "51818", "51821", "51824", "51830", "51838", "51844", "51851", "51852", "51854", "51869", "51883", "51900", "51904", "51933", "51934", "51944", "51952", "51985", "52005", "52022", "52024", "52033", "52044", "52046", "52082", "52095", "52105", "52106", "52123", "52124", "52139", "52145", "52150", "52160", "52185", "52193", "52205", "52210", "52219", "52224", "52230", "52244", "52255", "52276", "52281", "52284", "52291", "52293", "52297", "52324", "52352", "52366", "52389", "52400", "52403", "52444", "52455", "52459", "52469", "52477", "52503", "52522", "52532", "52539", "52562", "52565", "52621", "52629", "52645", "52648", "52668", "52686", "52698", "52704", "52711", "52724", "52729", "52731", "52735", "52738", "52758", "52808", "52812", "52814", "52819", "52834", "52884", "52898", "52917", "52924", "52935", "53003", "53027", "53054", "53075", "53094", "53100", "53109", "53175", "53188", "53191", "53199", "53235", "53252", "53287", "53289", "53292", "53298", "53301", "53308", "53318", "53342", "53360", "53368", "53372", "53373", "53394", "53405", "53425", "53445", "53447", "53451", "53481", "53511", "53521", "53524", "53530", "53540", "53592", "53595", "53604", "53607", "53617", "53622", "53628", "53629", "53647", "53664", "53683", "53704", "53764", "53788", "53790", "53804", "53811", "53824", "53830", "53834", "53845", "53868", "53879", "53918", "53941", "53949", "53951", "53957", "53958", "53959", "53971", "53984", "53989", "54027", "54029", "54039", "54045", "54062", "54072", "54074", "54088", "54090", "54092", "54098", "54104", "54106", "54107", "54120", "54136", "54145", "54146", "54153", "54167", "54169", "54174", "54188", "54190", "54212", "54219", "54240", "54244", "54253", "54254", "54255", "54267", "54273", "54286", "54289", "54312", "54324", "54338", "54355", "54356", "54360", "54369", "54372", "54387", "54408", "54422", "54429", "54435", "54462", "54464", "54466", "54484", "54489", "54525", "54536", "54546", "54554", "54561", "54569", "54592", "54593", "54597", "54602", "54614", "54629", "54642", "54683", "54686", "54689", "54694", "54742", "54744", "54748", "54749", "54752", "54760", "54769", "54820", "54843", "54851", "54854", "54919", "54925", "54966", "54975", "54980", "54981", "54993", "55018", "55038", "55079", "55085", "55097", "55114", "55115", "55119", "55124", "55143", "55167", "55183", "55236", "55247", "55258", "55272", "55274", "55281", "55290", "55294", "55319", "55325", "55350", "55354", "55386", "55391", "55392", "55402", "55413", "55420", "55423", "55436", "55442", "55458", "55460", "55464", "55465", "55469", "55491", "55495", "55501", "55508", "55521", "55532", "55547", "55553", "55566", "55576", "55580", "55591", "55605", "55615", "55621", "55623", "55625", "55642", "55669", "55676", "55681", "55695", "55709", "55717", "55743", "55752", "55781", "55798", "55799", "55802", "55804", "55817", "55819", "55843", "55858", "55867", "55888", "55894", "55903", "55922", "55933", "55945", "55957", "55965", "55966", "55975", "55980", "55985", "55987", "55990", "56000", "56033", "56045", "56053", "56060", "56074", "56075", "56088", "56097", "56098", "56115", "56134", "56144", "56156", "56192", "56196", "56235", "56237", "56239", "56240", "56245", "56261", "56277", "56282", "56284", "56291", "56294", "56340", "56343", "56364", "56386", "56389", "56397", "56431", "56454", "56457", "56475", "56497", "56510", "56517", "56534", "56539", "56549", "56550", "56552", "56557", "56575", "56594", "56607", "56614", "56622", "56653", "56668", "56693", "56696", "56697", "56699", "56702", "56704", "56708", "56719", "56723", "56747", "56787", "56788", "56796", "56798", "56820", "56830", "56853", "56867", "56871", "56888", "56893", "56896", "56909", "56913", "56933", "56938", "56944", "56955", "56971", "56984", "56992", "56995", "56997", "57018", "57021", "57022", "57023", "57042", "57057", "57066", "57113", "57137", "57149", "57172", "57175", "57207", "57237", "57253", "57272", "57280", "57292", "57317", "57319", "57322", "57333", "57334", "57354", "57377", "57389", "57395", "57397", "57411", "57422", "57441", "57529", "57534", "57537", "57538", "57564", "57570", "57595", "57609", "57644", "57646", "57658", "57671", "57682", "57683", "57687", "57702", "57713", "57721", "57743", "57745", "57746", "57748", "57751", "57752", "57759", "57783", "57820", "57835", "57838", "57841", "57845", "57880", "57896", "57903", "57932", "57934", "57936", "57940", "57941", "57951", "57991", "57995", "57999", "58001", "58003", "58014", "58041", "58048", "58054", "58057", "58059", "58075", "58087", "58095", "58109", "58119", "58124", "58137", "58140", "58163", "58164", "58165", "58167", "58205", "58209", "58222", "58223", "58249", "58252", "58257", "58274", "58280", "58283", "58336", "58337", "58339", "58342", "58354", "58370", "58371", "58375", "58389", "58391", "58423", "58430", "58434", "58466", "58482", "58496", "58522", "58525", "58529", "58548", "58556", "58567", "58578", "58579", "58582", "58587", "58589", "58593", "58595", "58605", "58617", "58637", "58654", "58721", "58722", "58754", "58767", "58773", "58784", "58785", "58811", "58828", "58862", "58877", "58883", "58899", "58903", "58909", "58924", "58930", "58935", "58948", "58971", "58988", "58994", "59003", "59037", "59044", "59089", "59094", "59101", "59105", "59120", "59145", "59162", "59165", "59188", "59193", "59230", "59231", "59236", "59237", "59252", "59275", "59303", "59305", "59306", "59309", "59310", "59318", "59343", "59351", "59352", "59376", "59387", "59391", "59398", "59431", "59452", "59454", "59471", "59515", "59530", "59532", "59534", "59550", "59553", "59566", "59568", "59583", "59584", "59611", "59614", "59634", "59642", "59651", "59672", "59681", "59698", "59726", "59737", "59740", "59750", "59787", "59793", "59818", "59840", "59843", "59858", "59865", "59920", "59938", "59959", "59966", "59971", "59981", "60001", "60008", "60026", "60029", "60049", "60059", "60068", "60069", "60116", "60125", "60141", "60149", "60150", "60156", "60162", "60163", "60164", "60169", "60191", "60194", "60216", "60224", "60236", "60238", "60244", "60245", "60254", "60262", "60268", "60275", "60296", "60297", "60302", "60309", "60311", "60314", "60317", "60319", "60323", "60325", "60330", "60336", "60344", "60348", "60369", "60381", "60382", "60383", "60399", "60407", "60408", "60411", "60418", "60419", "60428", "60445", "60451", "60468", "60481", "60485", "60492", "60499", "60514", "60519", "60522", "60525", "60526", "60534", "60540", "60573", "60580", "60586", "60587", "60589", "60618", "60631", "60634", "60635", "60636", "60647", "60662", "60663", "60679", "60681", "60689", "60702", "60725", "60728", "60732", "60744", "60757", "60765", "60766", "60817", "60851", "60852", "60854", "60858", "60870", "60875", "60894", "60899", "60913", "60918", "60919", "60945", "60965", "60982", "60986", "61075", "61083", "61087", "61103", "61104", "61135", "61140", "61143", "61147", "61150", "61160", "61176", "61178", "61184", "61185", "61187", "61201", "61204", "61206", "61209", "61216", "61218", "61219", "61229", "61233", "61254", "61255", "61278", "61286", "61289", "61320", "61327", "61338", "61345", "61355", "61364", "61373", "61379", "61383", "61387", "61398", "61400", "61403", "61409", "61421", "61423", "61425", "61453", "61479", "61495", "61521", "61523", "61525", "61529", "61545", "61549", "61564", "61581", "61590", "61606", "61620", "61621", "61632", "61639", "61649", "61651", "61654", "61666", "61669", "61675", "61685", "61686", "61692", "61711", "61715", "61732", "61733", "61749", "61757", "61764", "61767", "61771", "61777", "61781", "61782", "61795", "61797", "61818", "61821", "61824", "61830", "61838", "61844", "61852", "61854", "61869", "61877", "61900", "61904", "61933", "61934", "61944", "61952", "62005", "62022", "62024", "62033", "62037", "62044", "62046", "62063", "62082", "62095", "62105", "62106", "62123", "62139", "62145", "62150", "62160", "62185", "62193", "62205", "62210", "62219", "62224", "62230", "62244", "62255", "62258", "62276", "62281", "62284", "62291", "62293", "62297", "62324", "62366", "62389", "62403", "62444", "62455", "62459", "62469", "62477", "62503", "62522", "62532", "62539", "62562", "62565", "62621", "62629", "62633", "62645", "62668", "62686", "62698", "62711", "62724", "62729", "62731", "62735", "62738", "62758", "62807", "62808", "62812", "62814", "62819", "62834", "62884", "62891", "62894", "62898", "62917", "62924", "62935", "62952", "63003", "63027", "63054", "63075", "63100", "63109", "63175", "63188", "63191", "63199", "63235", "63289", "63292", "63298", "63301", "63318", "63342", "63360", "63372", "63373", "63394", "63396", "63405", "63425", "63445", "63447", "63451", "63481", "63511", "63521", "63524", "63540", "63592", "63595", "63604", "63607", "63617", "63622", "63628", "63629", "63647", "63664", "63683", "63764", "63788", "63790", "63804", "63824", "63830", "63845", "63868", "63879", "63883", "63918", "63941", "63949", "63951", "63957", "63958", "63959", "63984", "64004", "64027", "64029", "64039", "64062", "64072", "64074", "64088", "64090", "64092", "64098", "64104", "64107", "64120", "64136", "64137", "64145", "64146", "64153", "64167", "64169", "64174", "64188", "64190", "64212", "64219", "64240", "64244", "64253", "64254", "64255", "64267", "64273", "64286", "64289", "64312", "64324", "64338", "64355", "64356", "64360", "64369", "64372", "64408", "64422", "64429", "64435", "64462", "64464", "64466", "64477", "64484", "64489", "64525", "64536", "64546", "64554", "64561", "64569", "64592", "64593", "64597", "64602", "64614", "64615", "64629", "64642", "64686", "64689", "64694", "64742", "64744", "64748", "64749", "64752", "64760", "64820", "64843", "64851", "64854", "64919", "64925", "64975", "64980", "64993", "65010", "65018", "65038", "65079", "65085", "65097", "65114", "65115", "65119", "65124", "65167", "65236", "65241", "65247", "65258", "65271", "65272", "65274", "65281", "65294", "65319", "65325", "65354", "65386", "65392", "65413", "65420", "65423", "65436", "65442", "65458", "65460", "65464", "65465", "65491", "65495", "65497", "65501", "65508", "65521", "65532", "65553", "65566", "65576", "65580", "65590", "65591", "65597", "65605", "65615", "65621", "65623", "65642", "65669", "65673", "65676", "65681", "65695", "65709", "65717", "65720", "65752", "65781", "65798", "65799", "65802", "65804", "65817", "65819", "65843", "65858", "65867", "65888", "65903", "65933", "65945", "65957", "65965", "65966", "65980", "65985", "65987", "65990", "66000", "66033", "66045", "66053", "66060", "66074", "66075", "66097", "66098", "66115", "66134", "66144", "66149", "66156", "66192", "66196", "66197", "66235", "66237", "66239", "66240", "66245", "66261", "66277", "66282", "66284", "66291", "66294", "66340", "66343", "66364", "66378", "66386", "66397", "66444", "66454", "66457", "66475", "66489", "66510", "66517", "66534", "66539", "66549", "66550", "66557", "66575", "66594", "66607", "66614", "66622", "66653", "66668", "66693", "66696", "66697", "66699", "66702", "66708", "66719", "66723", "66747", "66749", "66787", "66788", "66796", "66798", "66801", "66820", "66830", "66867", "66871", "66888", "66893", "66894", "66896", "66909", "66913", "66933", "66938", "66944", "66955", "66964", "66971", "66984", "66992", "66995", "66997", "67018", "67021", "67022", "67023", "67039", "67042", "67057", "67066", "67098", "67137", "67149", "67172", "67175", "67219", "67237", "67272", "67280", "67292", "67317", "67319", "67322", "67333", "67334", "67354", "67369", "67377", "67389", "67395", "67411", "67422", "67441", "67484", "67529", "67537", "67538", "67563", "67570", "67609", "67644", "67646", "67658", "67671", "67682", "67683", "67687", "67702", "67713", "67721", "67743", "67745", "67746", "67748", "67751", "67752", "67759", "67783", "67820", "67838", "67841", "67845", "67854", "67880", "67896", "67903", "67932", "67936", "67940", "67941", "67951", "67995", "67997", "67999", "68001", "68003", "68014", "68018", "68041", "68048", "68054", "68057", "68059", "68075", "68080", "68087", "68095", "68109", "68119", "68124", "68137", "68140", "68164", "68165", "68167", "68209", "68221", "68222", "68223", "68239", "68249", "68252", "68257", "68274", "68280", "68283", "68336", "68337", "68339", "68342", "68354", "68370", "68371", "68375", "68389", "68430", "68434", "68466", "68482", "68496", "68519", "68522", "68525", "68529", "68556", "68567", "68578", "68579", "68582", "68587", "68589", "68593", "68595", "68605", "68654", "68674", "68721", "68722", "68754", "68767", "68773", "68784", "68785", "68811", "68828", "68862", "68877", "68883", "68899", "68903", "68909", "68924", "68930", "68935", "68948", "68971", "68988", "68994", "69003", "69037", "69044", "69089", "69094", "69101", "69112", "69120", "69145", "69162", "69188", "69193", "69230", "69231", "69236", "69237", "69252", "69275", "69303", "69305", "69306", "69309", "69310", "69318", "69343", "69351", "69352", "69376", "69387", "69391", "69398", "69431", "69452", "69454", "69501", "69502", "69515", "69532", "69534", "69550", "69553", "69566", "69568", "69583", "69584", "69611", "69614", "69634", "69642", "69651", "69672", "69681", "69698", "69726", "69737", "69740", "69750", "69766", "69785", "69793", "69818", "69829", "69840", "69843", "69858", "69865", "69920", "69938", "69959", "69966", "69971", "69981", "70001", "70008", "70026", "70029", "70056", "70059", "70069", "70116", "70125", "70141", "70149", "70150", "70156", "70162", "70163", "70164", "70169", "70191", "70193", "70194", "70204", "70216", "70224", "70236", "70238", "70244", "70245", "70254", "70262", "70268", "70275", "70278", "70296", "70297", "70302", "70307", "70309", "70311", "70317", "70319", "70320", "70323", "70325", "70336", "70344", "70348", "70369", "70383", "70386", "70399", "70407", "70408", "70411", "70414", "70418", "70419", "70428", "70451", "70481", "70485", "70492", "70499", "70514", "70519", "70525", "70534", "70540", "70573", "70580", "70584", "70586", "70587", "70589", "70613", "70618", "70631", "70634", "70635", "70636", "70662", "70663", "70679", "70681", "70689", "70702", "70725", "70732", "70744", "70757", "70765", "70766", "70817", "70844", "70851", "70852", "70854", "70870", "70875", "70894", "70899", "70913", "70918", "70919", "70965", "70967", "70974", "70982", "70986", "71075", "71083", "71087", "71103", "71104", "71140", "71143", "71147", "71160", "71176", "71178", "71184", "71185", "71187", "71201", "71204", "71206", "71209", "71216", "71218", "71229", "71254", "71278", "71286", "71289", "71309", "71327", "71345", "71355", "71364", "71373", "71379", "71383", "71387", "71398", "71400", "71409", "71421", "71423", "71425", "71453", "71479", "71495", "71513", "71521", "71523", "71529", "71545", "71549", "71564", "71581", "71590", "71606", "71620", "71621", "71632", "71639", "71649", "71651", "71654", "71666", "71669", "71675", "71685", "71686", "71692", "71711", "71715", "71721", "71732", "71733", "71749", "71753", "71757", "71764", "71767", "71771", "71777", "71781", "71782", "71795", "71797", "71811", "71821", "71824", "71830", "71838", "71844", "71851", "71852", "71854", "71869", "71877", "71883", "71900", "71904", "71933", "71934", "71944", "71952", "71985", "72005", "72022", "72024", "72033", "72044", "72046", "72063", "72082", "72095", "72105", "72106", "72123", "72124", "72139", "72145", "72150", "72160", "72185", "72193", "72205", "72210", "72219", "72224", "72244", "72258", "72276", "72284", "72291", "72293", "72297", "72324", "72352", "72366", "72389", "72400", "72403", "72444", "72455", "72459", "72469", "72477", "72503", "72522", "72539", "72562", "72565", "72621", "72629", "72645", "72668", "72686", "72704", "72711", "72724", "72738", "72758", "72807", "72808", "72812", "72814", "72819", "72834", "72884", "72891", "72894", "72898", "72924", "72935", "72952", "73003", "73027", "73075", "73094", "73100", "73109", "73175", "73188", "73191", "73199", "73235", "73252", "73289", "73298", "73301", "73308", "73318", "73342", "73360", "73368", "73372", "73373", "73394", "73396", "73405", "73425", "73445", "73451", "73481", "73511", "73521", "73524", "73530", "73540", "73592", "73595", "73604", "73607", "73617", "73622", "73628", "73629", "73647", "73664", "73683", "73704", "73725", "73764", "73788", "73790", "73804", "73811", "73824", "73830", "73834", "73845", "73868", "73879", "73883", "73898", "73941", "73949", "73951", "73957", "73958", "73959", "73984", "74004", "74027", "74029", "74039", "74045", "74062", "74072", "74074", "74090", "74092", "74098", "74104", "74106", "74107", "74120", "74136", "74145", "74146", "74153", "74167", "74169", "74174", "74188", "74190", "74212", "74219", "74240", "74244", "74253", "74254", "74255", "74267", "74273", "74286", "74289", "74312", "74317", "74324", "74338", "74356", "74360", "74369", "74372", "74387", "74408", "74422", "74462", "74464", "74466", "74477", "74484", "74489", "74525", "74536", "74546", "74554", "74561", "74593", "74597", "74602", "74614", "74615", "74629", "74642", "74683", "74686", "74689", "74694", "74742", "74744", "74748", "74749", "74752", "74760", "74769", "74820", "74843", "74851", "74854", "74919", "74921", "74925", "74975", "74980", "74981", "74993", "75010", "75018", "75038", "75079", "75085", "75114", "75115", "75119", "75124", "75143", "75167", "75183", "75236", "75241", "75247", "75258", "75271", "75272", "75274", "75281", "75290", "75294", "75319", "75325", "75354", "75386", "75391", "75392", "75402", "75413", "75420", "75423", "75436", "75442", "75458", "75464", "75465", "75469", "75491", "75495", "75497", "75508", "75521", "75547", "75566", "75576", "75580", "75590", "75591", "75597", "75605", "75615", "75621", "75623", "75625", "75642", "75669", "75673", "75681", "75695", "75709", "75717", "75720", "75743", "75752", "75781", "75798", "75799", "75802", "75804", "75817", "75819", "75843", "75858", "75867", "75888", "75903", "75922", "75933", "75945", "75957", "75965", "75975", "75980", "75985", "75987", "76000", "76033", "76045", "76060", "76074", "76088", "76097", "76098", "76115", "76134", "76149", "76156", "76192", "76196", "76197", "76237", "76239", "76245", "76261", "76277", "76284", "76291", "76294", "76340", "76343", "76364", "76378", "76386", "76389", "76397", "76431", "76444", "76454", "76457", "76475", "76489", "76497", "76510", "76517", "76534", "76539", "76549", "76550", "76552", "76557", "76575", "76594", "76607", "76614", "76622", "76653", "76668", "76693", "76696", "76697", "76699", "76702", "76704", "76719", "76723", "76747", "76749", "76787", "76788", "76796", "76798", "76801", "76820", "76830", "76853", "76867", "76871", "76888", "76893", "76894", "76896", "76909", "76913", "76933", "76938", "76944", "76955", "76964", "76971", "76984", "76992", "76995", "76997", "77018", "77021", "77023", "77039", "77057", "77066", "77098", "77113", "77119", "77137", "77149", "77175", "77207", "77219", "77237", "77253", "77272", "77280", "77292", "77317", "77319", "77322", "77333", "77334", "77354", "77369", "77377", "77389", "77411", "77441", "77484", "77529", "77537", "77538", "77563", "77564", "77570", "77595", "77609", "77644", "77646", "77658", "77683", "77687", "77702", "77713", "77721", "77743", "77745", "77746", "77748", "77750", "77751", "77752", "77759", "77783", "77820", "77838", "77845", "77854", "77880", "77896", "77912", "77932", "77934", "77936", "77940", "77941", "77951", "77991", "77995", "77997", "77999", "78001", "78003", "78014", "78018", "78041", "78048", "78054", "78057", "78059", "78075", "78080", "78087", "78095", "78109", "78119", "78137", "78140", "78163", "78164", "78167", "78209", "78221", "78222", "78223", "78249", "78252", "78257", "78274", "78280", "78283", "78336", "78337", "78339", "78342", "78354", "78370", "78371", "78375", "78389", "78423", "78434", "78482", "78496", "78519", "78522", "78525", "78529", "78556", "78567", "78578", "78579", "78582", "78587", "78589", "78593", "78595", "78605", "78617", "78637", "78674", "78721", "78722", "78754", "78767", "78773", "78784", "78785", "78811", "78828", "78862", "78877", "78883", "78899", "78903", "78909", "78924", "78930", "78935", "78948", "78971", "78988", "78994", "79003", "79037", "79044", "79089", "79094", "79101", "79105", "79112", "79120", "79145", "79165", "79188", "79193", "79230", "79231", "79236", "79237", "79275", "79303", "79305", "79306", "79310", "79318", "79343", "79351", "79352", "79358", "79376", "79387", "79391", "79398", "79431", "79452", "79454", "79471", "79501", "79515", "79525", "79530", "79532", "79534", "79550", "79553", "79566", "79568", "79583", "79584", "79611", "79614", "79634", "79642", "79651", "79672", "79681", "79726", "79737", "79740", "79766", "79785", "79787", "79793", "79818", "79829", "79840", "79843", "79858", "79865", "79920", "79938", "79959", "79966", "79971", "79981", "80001", "80008", "80026", "80029", "80049", "80059", "80068", "80069", "80116", "80125", "80141", "80149", "80150", "80156", "80162", "80164", "80169", "80191", "80194", "80204", "80216", "80236", "80244", "80245", "80262", "80268", "80275", "80278", "80296", "80302", "80307", "80309", "80311", "80314", "80317", "80319", "80320", "80330", "80344", "80348", "80369", "80383", "80386", "80399", "80408", "80411", "80414", "80418", "80419", "80428", "80451", "80468", "80481", "80485", "80492", "80499", "80519", "80525", "80526", "80534", "80540", "80573", "80580", "80587", "80589", "80618", "80631", "80634", "80635", "80647", "80679", "80681", "80702", "80725", "80732", "80744", "80765", "80766", "80817", "80852", "80854", "80858", "80875", "80894", "80899", "80913", "80919", "80945", "80965", "80974", "80986", "81075", "81083", "81087", "81103", "81104", "81140", "81143", "81147", "81150", "81160", "81176", "81178", "81184", "81185", "81201", "81204", "81206", "81209", "81216", "81218", "81219", "81229", "81254", "81255", "81286", "81289", "81309", "81320", "81338", "81345", "81355", "81383", "81387", "81398", "81400", "81409", "81422", "81423", "81425", "81453", "81479", "81513", "81521", "81525", "81529", "81545", "81564", "81620", "81621", "81639", "81649", "81654", "81669", "81675", "81686", "81692", "81711", "81715", "81732", "81733", "81749", "81753", "81757", "81764", "81792", "81795", "81797", "81811", "81818", "81821", "81824", "81830", "81838", "81844", "81851", "81852", "81854", "81883", "81888", "81900", "81904", "81933", "81934", "81944", "81952", "82022", "82024", "82033", "82037", "82063", "82082", "82095", "82105", "82106", "82123", "82124", "82139", "82150", "82193", "82205", "82219", "82224", "82230", "82244", "82276", "82284", "82293", "82297", "82324", "82352", "82366", "82389", "82455", "82459", "82477", "82503", "82522", "82539", "82562", "82565", "82621", "82629", "82648", "82668", "82686", "82698", "82711", "82724", "82729", "82731", "82735", "82738", "82758", "82807", "82808", "82812", "82814", "82819", "82834", "82884", "82894", "82898", "82917", "82924", "82935", "83003", "83027", "83075", "83094", "83100", "83175", "83188", "83199", "83235", "83298", "83301", "83342", "83360", "83368", "83372", "83373", "83394", "83396", "83405", "83425", "83445", "83447", "83451", "83481", "83511", "83521", "83524", "83530", "83592", "83595", "83604", "83628", "83683", "83764", "83788", "83790", "83804", "83824", "83830", "83845", "83852", "83868", "83879", "83883", "83898", "83918", "83949", "83951", "83957", "83958", "83971", "83984", "83989", "84027", "84029", "84039", "84045", "84062", "84072", "84074", "84088", "84090", "84092", "84098", "84104", "84107", "84120", "84136", "84145", "84146", "84153", "84167", "84169", "84174", "84188", "84190", "84212", "84219", "84240", "84244", "84253", "84254", "84273", "84289", "84312", "84317", "84324", "84338", "84355", "84356", "84360", "84369", "84372", "84387", "84408", "84422", "84462", "84464", "84466", "84489", "84525", "84536", "84546", "84554", "84561", "84569", "84592", "84593", "84602", "84614", "84615", "84642", "84683", "84686", "84694", "84742", "84744", "84748", "84749", "84752", "84760", "84769", "84820", "84843", "84851", "84854", "84919", "84921", "84925", "84966", "84975", "84980", "84981", "85018", "85038", "85079", "85097", "85114", "85115", "85119", "85124", "85143", "85247", "85258", "85272", "85274", "85281", "85290", "85294", "85325", "85350", "85354", "85386", "85392", "85402", "85413", "85420", "85423", "85458", "85460", "85464", "85465", "85469", "85491", "85495", "85497", "85501", "85508", "85521", "85547", "85553", "85566", "85576", "85580", "85590", "85591", "85597", "85605", "85621", "85623", "85642", "85669", "85681", "85695", "85709", "85717", "85720", "85743", "85752", "85781", "85798", "85799", "85802", "85804", "85817", "85819", "85858", "85867", "85903", "85945", "85957", "85965", "85980", "85985", "85987", "85990", "86000", "86045", "86060", "86074", "86075", "86088", "86097", "86098", "86115", "86134", "86144", "86156", "86192", "86196", "86197", "86235", "86237", "86239", "86245", "86261", "86277", "86284", "86291", "86340", "86343", "86364", "86386", "86389", "86397", "86431", "86454", "86457", "86475", "86489", "86510", "86517", "86534", "86549", "86552", "86557", "86575", "86594", "86607", "86614", "86622", "86653", "86668", "86693", "86696", "86697", "86699", "86704", "86708", "86719", "86749", "86787", "86788", "86796", "86798", "86801", "86820", "86830", "86853", "86867", "86871", "86888", "86893", "86894", "86896", "86909", "86913", "86933", "86938", "86944", "86955", "86964", "86971", "86992", "86997", "87018", "87021", "87023", "87039", "87042", "87057", "87066", "87098", "87113", "87149", "87172", "87175", "87219", "87237", "87272", "87280", "87292", "87317", "87319", "87334", "87354", "87377", "87395", "87397", "87411", "87441", "87529", "87534", "87537", "87538", "87564", "87570", "87595", "87609", "87644", "87646", "87658", "87671", "87682", "87683", "87687", "87702", "87713", "87721", "87743", "87745", "87746", "87748", "87750", "87752", "87783", "87820", "87838", "87841", "87845", "87854", "87880", "87896", "87912", "87934", "87936", "87940", "87941", "87991", "87995", "87999", "88001", "88003", "88014", "88018", "88041", "88048", "88054", "88059", "88075", "88087", "88095", "88109", "88119", "88124", "88137", "88140", "88163", "88164", "88167", "88209", "88221", "88222", "88223", "88239", "88252", "88274", "88280", "88283", "88336", "88337", "88339", "88342", "88354", "88370", "88371", "88375", "88389", "88430", "88434", "88466", "88482", "88522", "88525", "88529", "88556", "88567", "88578", "88579", "88582", "88589", "88593", "88595", "88605", "88617", "88637", "88674", "88721", "88722", "88754", "88767", "88773", "88784", "88785", "88811", "88828", "88862", "88877", "88883", "88903", "88909", "88924", "88930", "88948", "88971", "88988", "88994", "89003", "89089", "89094", "89101", "89120", "89145", "89188", "89193", "89230", "89231", "89236", "89237", "89252", "89275", "89303", "89305", "89306", "89309", "89310", "89343", "89351", "89352", "89387", "89391", "89398", "89431", "89452", "89454", "89501", "89515", "89532", "89534", "89550", "89583", "89584", "89614", "89634", "89642", "89651", "89672", "89681", "89726", "89737", "89740", "89766", "89785", "89793", "89818", "89829", "89840", "89843", "89858", "89865", "89920", "89938", "89959", "89971", "89981", "90001", "90008", "90026", "90049", "90056", "90059", "90069", "90116", "90125", "90128", "90141", "90150", "90162", "90163", "90164", "90169", "90191", "90193", "90194", "90216", "90236", "90238", "90262", "90268", "90275", "90278", "90296", "90297", "90302", "90307", "90309", "90311", "90314", "90319", "90320", "90323", "90325", "90336", "90344", "90369", "90381", "90383", "90399", "90408", "90411", "90414", "90418", "90419", "90428", "90444", "90451", "90468", "90485", "90492", "90499", "90519", "90522", "90525", "90526", "90534", "90540", "90573", "90584", "90586", "90587", "90589", "90618", "90631", "90634", "90635", "90681", "90725", "90728", "90732", "90744", "90757", "90765", "90766", "90817", "90852", "90854", "90875", "90894", "90899", "90913", "90918", "90919", "90965", "90982", "90986", "91075", "91083", "91087", "91104", "91135", "91143", "91147", "91160", "91176", "91178", "91201", "91204", "91206", "91209", "91216", "91218", "91219", "91229", "91233", "91254", "91278", "91286", "91289", "91294", "91320", "91364", "91379", "91387", "91398", "91403", "91409", "91421", "91423", "91439", "91453", "91495", "91521", "91523", "91525", "91529", "91545", "91564", "91590", "91620", "91632", "91639", "91649", "91654", "91669", "91675", "91685", "91686", "91692", "91711", "91715", "91721", "91732", "91749", "91757", "91764", "91767", "91782", "91818", "91821", "91824", "91830", "91838", "91844", "91851", "91852", "91854", "91869", "91877", "91904", "91933", "91934", "91944", "91952", "92005", "92022", "92024", "92033", "92046", "92105", "92106", "92123", "92124", "92139", "92150", "92160", "92185", "92205", "92219", "92244", "92255", "92258", "92276", "92284", "92291", "92293", "92297", "92324", "92366", "92444", "92455", "92459", "92477", "92503", "92532", "92562", "92565", "92621", "92629", "92648", "92668", "92686", "92704", "92724", "92738", "92807", "92808", "92812", "92814", "92819", "92834", "92884", "92898", "92917", "92924", "92935", "93003", "93027", "93075", "93109", "93175", "93235", "93289", "93292", "93298", "93301", "93318", "93342", "93360", "93368", "93372", "93394", "93405", "93425", "93445", "93447", "93451", "93481", "93511", "93521", "93524", "93592", "93595", "93604", "93617", "93622", "93628", "93629", "93683", "93788", "93790", "93804", "93824", "93830", "93868", "93879", "93883", "93951", "93957", "93959", "93971", "94004", "94027", "94029", "94039", "94062", "94074", "94088", "94090", "94092", "94098", "94104", "94106", "94107", "94120", "94136", "94145", "94153", "94167", "94174", "94188", "94190", "94212", "94240", "94244", "94253", "94254", "94267", "94273", "94286", "94289", "94312", "94324", "94338", "94355", "94369", "94372", "94408", "94422", "94435", "94462", "94464", "94466", "94489", "94525", "94536", "94546", "94554", "94569", "94593", "94597", "94602", "94614", "94615", "94642", "94683", "94686", "94689", "94694", "94744", "94748", "94749", "94752", "94760", "94769", "94843", "94854", "94919", "94921", "94975", "94980", "94993", "95010", "95018", "95038", "95079", "95114", "95115", "95119", "95124", "95167", "95236", "95241", "95258", "95271", "95272", "95281", "95290", "95294", "95325", "95354", "95386", "95391", "95392", "95402", "95413", "95420", "95423", "95460", "95464", "95465", "95491", "95495", "95497", "95501", "95508", "95521", "95566", "95576", "95591", "95597", "95605", "95621", "95623", "95625", "95669", "95681", "95709", "95717", "95743", "95752", "95781", "95799", "95802", "95804", "95817", "95819", "95858", "95867", "95888", "95903", "95933", "95945", "95965", "95975", "95980", "95985", "95990", "96000", "96045", "96060", "96074", "96075", "96088", "96098", "96115", "96134", "96144", "96192", "96196", "96197", "96237", "96239", "96240", "96245", "96277", "96284", "96291", "96294", "96340", "96343", "96364", "96386", "96389", "96431", "96444", "96457", "96489", "96497", "96517", "96534", "96539", "96549", "96557", "96575", "96594", "96607", "96614", "96622", "96653", "96668", "96693", "96696", "96697", "96699", "96719", "96723", "96749", "96787", "96788", "96798", "96801", "96820", "96830", "96853", "96867", "96871", "96893", "96894", "96896", "96909", "96933", "96938", "96955", "96964", "96971", "96984", "96992", "96997", "97021", "97023", "97039", "97042", "97057", "97066", "97098", "97113", "97137", "97172", "97207", "97219", "97253", "97272", "97292", "97317", "97319", "97322", "97333", "97334", "97354", "97395", "97411", "97441", ) PACKAGE_STATUSES = ( 'Manifest', 'In-Transit', 'Exception', 'Out for Delivery', 'Ready for Pickup', 'Delivered', 'Void', ) with open('spring/dictionary.txt') as fh: LOREM = fh.read() with open('spring/garbage.txt') as fh: GARBAGE = fh.read()
pavel-paulau/perfrunner
spring/dictionary.py
Python
apache-2.0
138,828
[ "COLUMBUS", "Elk" ]
5ae2aef1160c1c433ba63bc53290a2d0780f6989d86087ae075ff5d4f6ea6e2d
# ============================================================================ # # Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved. # www.conceptive.be / project-camelot@conceptive.be # # This file is part of the Camelot Library. # # This file may be used under the terms of the GNU General Public # License version 2.0 as published by the Free Software Foundation # and appearing in the file license.txt included in the packaging of # this file. Please review this information to ensure GNU # General Public Licensing requirements will be met. # # If you are unsure which license is appropriate for your use, please # visit www.python-camelot.com or contact project-camelot@conceptive.be # # This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE # WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. # # For use of this library in commercial applications, please contact # project-camelot@conceptive.be # # ============================================================================ from PyQt4 import QtGui, QtCore from PyQt4.QtCore import Qt class BusyWidget(QtGui.QWidget): """A widget indicating the application is performing some background task. The widget acts as an overlay of its parent widget and displays animating orbs""" def __init__(self, parent = None): QtGui.QWidget.__init__(self, parent) palette = QtGui.QPalette(self.palette()) palette.setColor(palette.Background, Qt.transparent) self.setPalette(palette) self.setAttribute(Qt.WA_TransparentForMouseEvents) self.orbs = 5 self.highlighted_orb = self.orbs self.timer = None @QtCore.pyqtSlot(bool) def set_busy(self, busy_state): """start/stop the animation :arg busy_state: True or False """ import sip if sip.isdeleted(self): """underlying object is deleted, no use trying anything""" return if busy_state: self.timer = self.startTimer(200) self.counter = 0 self.show() else: if self.timer: self.killTimer(self.timer) self.timer = None self.hide() def paintEvent(self, event): """custom paint, painting the orbs""" painter = QtGui.QPainter() painter.begin(self) painter.setRenderHint(QtGui.QPainter.Antialiasing) painter.setPen(QtGui.QPen(Qt.NoPen)) width = self.width() height = self.height() radius = min(width/(3*self.orbs+1), height/4) for i in range(self.orbs): if i!=self.highlighted_orb: painter.setBrush(QtGui.QBrush(QtGui.QColor(180, 180, 180))) else: painter.setBrush(QtGui.QBrush(QtGui.QColor(127, 127, 127))) center_x = width - (3*i+2)*radius center_y = height / 2 painter.drawEllipse(center_x - radius, center_y - radius, 2*radius, 2*radius) painter.end() def timerEvent(self, event): """custom timer event, updating the animation""" self.update() self.counter += 1 self.highlighted_orb -= 1 if self.highlighted_orb < 0: self.highlighted_orb = self.orbs
kurtraschke/camelot
camelot/view/controls/busy_widget.py
Python
gpl-2.0
3,415
[ "VisIt" ]
b90a57dd298d2691f458ea3f6281c351a4809252017278171b9c55ad6665b6e9
# The MIT License (MIT) # # Copyright (c) 2015 Brian Wray (brian@wrocket.org) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import subprocess import json import unittest def call_tulip(args): cmd = ['../../src/tulip'] cmd.extend(args) out = subprocess.check_output(cmd) return out.decode('utf-8') class SearchResult: def __init__(self, move, score, move_scores=[]): self.move = move self.score = score self.move_scores = move_scores class TestSearch(unittest.TestCase): def setUp(self): None def get_result(self, fen): result = call_tulip(['-simplesearch', fen]) parsed_output = json.loads(result) json_obj = parsed_output['searchResult'] parsed_scores = [(x['move'], x['score']) for x in json_obj['rootNodeScores']] sorted_scores = sorted(parsed_scores, key=lambda x: -1* x[1]) # sort move scores by score return SearchResult(move=json_obj['move'], score=json_obj['score'], move_scores=sorted_scores) def test_easy_skewer(self): result = self.get_result('4q3/8/8/8/4k3/8/8/2KR4 w - - 0 1') self.assertEqual('Re1+', result.move) def test_friend_liver_qs(self): result = self.get_result('rnbqkb1r/1pp1ppp1/p4n1p/1N1p4/3P1B2/8/PPP1PPPP/R2QKBNR w KQkq - 0 5') self.assertEqual('Nxc7+', result.move) def test_simple_fork(self): result = self.get_result('7k/8/2n4p/8/2K3Q1/8/8/8 b - - 0 1') self.assertEqual('Ne5+', result.move) def test_play_for_draw(self): result = self.get_result('7k/8/2n5/8/2K3Q1/8/8/8 b - - 0 1') self.assertEqual('Ne5+', result.move) def test_short_mate(self): result = self.get_result('1rb2rk1/p1q1ppbp/n2p3B/3n1P2/1ppP4/3B1N2/PPPQN1PP/K2R3R w - - 0 1') self.assertEqual('Qg5', result.move) if __name__ == '__main__': unittest.main()
wrocket/Tulip-Chess
tests/search_tests/test_simple_attacks.py
Python
mit
2,888
[ "Brian" ]
c3230108f28e6754b2765bf9482fcda1559eddc262633aa973732b79f34b7e54
__author__ = "Mihaela Rosca" __contact__ = "mihaela.c.rosca@gmail.com" import numpy as np import restrictedBoltzmannMachine as rbm # TODO: use conjugate gradient for backpropagation instead of steepest descent # see here for a theano example http://deeplearning.net/tutorial/code/logistic_cg.py # TODO: add weight decay in back prop but especially with the constraint # on the weights # TODO: monitor the changes in error and change the learning rate according # to that # TODO: wake sleep for improving generation # TODO: nesterov method for momentum """In all the above topLayer does not mean the top most layer, but rather the layer above the current one.""" from common import * """ Class that implements a deep belief network, for classification """ class DBN(object): """ Arguments: nrLayers: the number of layers of the network. In case of discriminative traning, also contains the classifcation layer (the last softmax layer) type: integer layerSizes: the sizes of the individual layers. type: list of integers of size nrLayers activationFunctions: the functions that are used to transform the input of a neuron into its output. The functions should be vectorized (as per numpy) to be able to apply them for an entire layer. type: list of objects of type ActivationFunction """ def __init__(self, nrLayers, layerSizes, activationFunctions, dropout=0.5, rbmDropout=0.5, visibleDropout=0.8, rbmVisibleDropout=1): self.nrLayers = nrLayers self.layerSizes = layerSizes # Note that for the first one the activatiom function does not matter # So for that one there is no need to pass in an activation function self.activationFunctions = activationFunctions self.initialized = False self.dropout = dropout self.rbmDropout = rbmDropout self.visibleDropout = visibleDropout self.rbmVisibleDropout = rbmVisibleDropout assert len(layerSizes) == nrLayers assert len(activationFunctions) == nrLayers - 1 """ TODO: If labels = None, only does the generative training with fine tuning for generation, not for discrimintaiton TODO: what happens if you do both? do the fine tuning for generation and then do backprop for discrimintaiton """ def train(self, data, labels=None): # This depends if you have generative or not nrRbms = self.nrLayers - 2 self.weights = [] self.biases = [] currentData = data for i in xrange(nrRbms): net = rbm.RBM(self.layerSizes[i], self.layerSizes[i+1], rbm.contrastiveDivergence, self.rbmDropout, self.rbmVisibleDropout, self.activationFunctions[i].value) net.train(currentData) self.weights += [net.weights / self.dropout] self.biases += [net.biases[1]] currentData = net.hiddenRepresentation(currentData) # This depends if you have generative or not # Initialize the last layer of weights to zero if you have # a discriminative net self.weights += [np.zeros((self.layerSizes[-2], self.layerSizes[-1]))] self.biases += [np.zeros(self.layerSizes[-1])] assert len(self.weights) == self.nrLayers - 1 assert len(self.biases) == self.nrLayers - 1 # Does backprop or wake sleep? self.fineTune(data, labels) self.classifcationWeights = map(lambda x: x * self.dropout, self.weights) self.classifcationBiases = self.biases """Fine tunes the weigths and biases using backpropagation. Arguments: data: The data used for traning and fine tuning labels: A numpy nd array. Each label should be transformed into a binary base vector before passed into this function. miniBatch: The number of instances to be used in a miniBatch epochs: The number of epochs to use for fine tuning """ def fineTune(self, data, labels, miniBatchSize=10, epochs=100): learningRate = 0.1 batchLearningRate = learningRate / miniBatchSize nrMiniBatches = len(data) / miniBatchSize oldDWeights = zerosFromShape(self.weights) oldDBias = zerosFromShape(self.biases) stages = len(self.weights) # TODO: maybe find a better way than this to find a stopping criteria for epoch in xrange(epochs): if epoch < epochs / 10: momentum = 0.5 else: momentum = 0.95 for batch in xrange(nrMiniBatches): start = batch * miniBatchSize end = (batch + 1) * miniBatchSize batchData = data[start: end] # this is a list of layer activities layerValues = forwardPassDropout(self.weights, self.biases, self.activationFunctions, batchData, self.dropout, self.visibleDropout) finalLayerErrors = derivativesCrossEntropyError(labels[start:end], layerValues[-1]) # Compute all derivatives dWeights, dBias = backprop(self.weights, layerValues, finalLayerErrors, self.activationFunctions) # Update the weights and biases using gradient descent # Also update the old weights for index in xrange(stages): oldDWeights[index] = momentum * oldDWeights[index] - batchLearningRate * dWeights[index] oldDBias[index] = momentum * oldDBias[index] - batchLearningRate * dBias[index] self.weights[index] += oldDWeights[index] self.biases[index] += oldDBias[index] def classify(self, dataInstaces): lastLayerValues = forwardPass(self.classifcationWeights, self.classifcationBiases, self.activationFunctions, dataInstaces)[-1] return lastLayerValues, np.argmax(lastLayerValues, axis=1) """ Arguments: weights: list of numpy nd-arrays layerValues: list of numpy arrays, each array representing the values of the neurons obtained during a forward pass of the network finalLayerErrors: errors on the final layer, they depend on the error function chosen. For softmax activation function on the last layer, use cross entropy as an error function. """ def backprop(weights, layerValues, finalLayerErrors, activationFunctions): nrLayers = len(weights) + 1 deDw = [] deDbias = [] upperLayerErrors = finalLayerErrors for layer in xrange(nrLayers - 1, 0, -1): deDz = activationFunctions[layer - 1].derivativeForLinearSum( upperLayerErrors, layerValues[layer]) # upperLayerErrors = np.dot(deDz, weights[layer - 1].T) upperLayerErrors = np.tensordot(deDz, weights[layer - 1].T, [[deDz.ndim - 1], [weights[layer - 1].T.ndim -2]]) dw = np.einsum('ij,ik->jk', layerValues[layer - 1], deDz) dbias = deDz.sum(axis=0) # Iterating in decreasing order of layers, so we are required to # append the weight derivatives at the front as we go along deDw.insert(0, dw) deDbias.insert(0, dbias) return deDw, deDbias """ Does not do dropout. Used for classification. """ def forwardPass(weights, biases, activationFunctions, dataInstaces): currentLayerValues = dataInstaces layerValues = [currentLayerValues] size = dataInstaces.shape[0] for stage in xrange(len(weights)): w = weights[stage] b = biases[stage] activation = activationFunctions[stage] linearSum = np.dot(currentLayerValues, w) + np.tile(b, (size, 1)) currentLayerValues = activation.value(linearSum) layerValues += [currentLayerValues] return layerValues """Does a forward pass trought the network and computes the values of the neurons in all the layers. Required for backpropagation and classification. Arguments: dataInstaces: The instances to be run trough the network. """ def forwardPassDropout(weights, biases, activationFunctions, dataInstaces, dropout, visibleDropout): # dropout on the visible units # generally this is around 80% visibleOn = sample(visibleDropout, dataInstaces.shape) thinnedValues = dataInstaces * visibleOn layerValues = [thinnedValues] size = dataInstaces.shape[0] for stage in xrange(len(weights)): w = weights[stage] b = biases[stage] activation = activationFunctions[stage] linearSum = np.dot(thinnedValues, w) + np.tile(b, (size, 1)) currentLayerValues = activation.value(linearSum) # this is the way to do it, because of how backprop works the wij # will cancel out if the unit on the layer is non active # de/ dw_i_j = de / d_z_j * d_z_j / d_w_i_j = de / d_z_j * y_i # so if we set a unit as non active here (and we have to because # of this exact same reason and of ow we backpropagate) if stage != len(weights) - 1: on = sample(dropout, currentLayerValues.shape) thinnedValues = on * currentLayerValues layerValues += [thinnedValues] else: layerValues += [currentLayerValues] return layerValues """ Computes the derivatives of the top most layer given their output and the target labels. This is computed using the cross entropy function. See: http://en.wikipedia.org/wiki/Cross_entropy for the discrete case. Since it is used with a softmax unit for classification, the output of the unit represent a discrete probablity distribution and the expected values are composed of a base vector, with 1 for the correct class and 0 for all the rest. """ def derivativesCrossEntropyError(expected, actual): return - expected * (1.0 / actual) # Only works with binary units def wakeSleep(): pass # need to alternate between wake and sleep pahses
mihaelacr/pydeeplearn
code/old-version/deepbelief.py
Python
bsd-3-clause
9,738
[ "NEURON" ]
c7c23e9c62d3061dd1cc1023b96a7affeb6bb15413846af377387112c807b755
''' Abyssium - Aluminum - 2.71 Ash - 0.71 Atherite - Beechwood - 0.82 Brass - 8.47 Bronze - 8.20 Ceramic - Chainmail - Copper - 8.94 Cotton - Diamond - 0.03 Divinum - Glass - Gold - 19.32 Infernite - Iron - 7.85 Iurwood - 1.10 Leather - 0.86 Magicite - Mahogany - 0.66 Maple - 0.62 Negacite - Oak - 0.74 Platinum - 21.40 Sacrium - Salericite - Silk - Silver - 10.49 Steel - 7.85 StuddedLeather - Tungsten - 19.60 ''' from .Abyssium import Abyssium from .Aluminum import Aluminum from .Ash import Ash from .Atherite import Atherite from .Brass import Brass from .Bronze import Bronze from .Ceramic import Ceramic from .Chainmail import Chainmail from .Copper import Copper from .Cotton import Cotton from .Crystal import Crystal from .Divinum import Divinum from .Glass import Glass from .Gold import Gold from .Infernite import Infernite from .Iron import Iron from .Iurwood import Iurwood from .Leather import Leather from .Magicite import Magicite from .Mahogany import Mahogany from .Maple import Maple from .Negacite import Negacite from .Oak import Oak from .Platinum import Platinum from .Sacrium import Sacrium from .Salericite import Salericite from .Silk import Silk from .Silver import Silver from .Steel import Steel from .StuddedLeather import StuddedLeather from .Tungsten import Tungsten __all__ = [ "Abyssium", "Aluminum", "Ash", "Atherite", "Brass", "Bronze", "Ceramic", "Chainmail", "Copper", "Cotton", "Crystal", "Divinum", "Glass", "Gold", "Infernite", "Iron", "Iurwood", "Leather", "Magicite", "Mahogany", "Maple", "Negacite", "Oak", "Platinum", "Sacrium", "Salericite", "Silk", "Silver", "Steel", "StuddedLeather", "Tungsten" ]
etkirsch/legends-of-erukar
erukar/content/modifiers/material/random/__init__.py
Python
agpl-3.0
2,083
[ "CRYSTAL" ]
0e90dc992b43a04ac5e34b3514ff2fa9e54e82522bf8f536f070bf0b76e11a82
import unittest from test import support from itertools import * from weakref import proxy from decimal import Decimal from fractions import Fraction import sys import operator import random import copy import pickle from functools import reduce maxsize = support.MAX_Py_ssize_t minsize = -maxsize-1 def lzip(*args): return list(zip(*args)) def onearg(x): 'Test function of one argument' return 2*x def errfunc(*args): 'Test function that raises an error' raise ValueError def gen3(): 'Non-restartable source sequence' for i in (0, 1, 2): yield i def isEven(x): 'Test predicate' return x%2==0 def isOdd(x): 'Test predicate' return x%2==1 def tupleize(*args): return args def irange(n): for i in range(n): yield i class StopNow: 'Class emulating an empty iterable.' def __iter__(self): return self def __next__(self): raise StopIteration def take(n, seq): 'Convenience function for partially consuming a long of infinite iterable' return list(islice(seq, n)) def prod(iterable): return reduce(operator.mul, iterable, 1) def fact(n): 'Factorial' return prod(range(1, n+1)) # root level methods for pickling ability def testR(r): return r[0] def testR2(r): return r[2] def underten(x): return x<10 class TestBasicOps(unittest.TestCase): def pickletest(self, it, stop=4, take=1, compare=None): """Test that an iterator is the same after pickling, also when part-consumed""" def expand(it, i=0): # Recursively expand iterables, within sensible bounds if i > 10: raise RuntimeError("infinite recursion encountered") if isinstance(it, str): return it try: l = list(islice(it, stop)) except TypeError: return it # can't expand it return [expand(e, i+1) for e in l] # Test the initial copy against the original dump = pickle.dumps(it) i2 = pickle.loads(dump) self.assertEqual(type(it), type(i2)) a, b = expand(it), expand(i2) self.assertEqual(a, b) if compare: c = expand(compare) self.assertEqual(a, c) # Take from the copy, and create another copy and compare them. i3 = pickle.loads(dump) took = 0 try: for i in range(take): next(i3) took += 1 except StopIteration: pass #in case there is less data than 'take' dump = pickle.dumps(i3) i4 = pickle.loads(dump) a, b = expand(i3), expand(i4) self.assertEqual(a, b) if compare: c = expand(compare[took:]) self.assertEqual(a, c); def test_accumulate(self): self.assertEqual(list(accumulate(range(10))), # one positional arg [0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) self.assertEqual(list(accumulate(iterable=range(10))), # kw arg [0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) for typ in int, complex, Decimal, Fraction: # multiple types self.assertEqual( list(accumulate(map(typ, range(10)))), list(map(typ, [0, 1, 3, 6, 10, 15, 21, 28, 36, 45]))) self.assertEqual(list(accumulate('abc')), ['a', 'ab', 'abc']) # works with non-numeric self.assertEqual(list(accumulate([])), []) # empty iterable self.assertEqual(list(accumulate([7])), [7]) # iterable of length one self.assertRaises(TypeError, accumulate, range(10), 5, 6) # too many args self.assertRaises(TypeError, accumulate) # too few args self.assertRaises(TypeError, accumulate, x=range(10)) # unexpected kwd arg self.assertRaises(TypeError, list, accumulate([1, []])) # args that don't add s = [2, 8, 9, 5, 7, 0, 3, 4, 1, 6] self.assertEqual(list(accumulate(s, min)), [2, 2, 2, 2, 2, 0, 0, 0, 0, 0]) self.assertEqual(list(accumulate(s, max)), [2, 8, 9, 9, 9, 9, 9, 9, 9, 9]) self.assertEqual(list(accumulate(s, operator.mul)), [2, 16, 144, 720, 5040, 0, 0, 0, 0, 0]) with self.assertRaises(TypeError): list(accumulate(s, chr)) # unary-operation self.pickletest(accumulate(range(10))) # test pickling def test_chain(self): def chain2(*iterables): 'Pure python version in the docs' for it in iterables: for element in it: yield element for c in (chain, chain2): self.assertEqual(list(c('abc', 'def')), list('abcdef')) self.assertEqual(list(c('abc')), list('abc')) self.assertEqual(list(c('')), []) self.assertEqual(take(4, c('abc', 'def')), list('abcd')) self.assertRaises(TypeError, list,c(2, 3)) def test_chain_from_iterable(self): self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef')) self.assertEqual(list(chain.from_iterable(['abc'])), list('abc')) self.assertEqual(list(chain.from_iterable([''])), []) self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd')) self.assertRaises(TypeError, list, chain.from_iterable([2, 3])) def test_chain_reducible(self): operators = [copy.deepcopy, lambda s: pickle.loads(pickle.dumps(s))] for oper in operators: it = chain('abc', 'def') self.assertEqual(list(oper(it)), list('abcdef')) self.assertEqual(next(it), 'a') self.assertEqual(list(oper(it)), list('bcdef')) self.assertEqual(list(oper(chain(''))), []) self.assertEqual(take(4, oper(chain('abc', 'def'))), list('abcd')) self.assertRaises(TypeError, list, oper(chain(2, 3))) self.pickletest(chain('abc', 'def'), compare=list('abcdef')) def test_combinations(self): self.assertRaises(TypeError, combinations, 'abc') # missing r argument self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, combinations, None) # pool is not iterable self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative for op in (lambda a:a, lambda a:pickle.loads(pickle.dumps(a))): self.assertEqual(list(op(combinations('abc', 32))), []) # r > n self.assertEqual(list(op(combinations('ABCD', 2))), [('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')]) testIntermediate = combinations('ABCD', 2) next(testIntermediate) self.assertEqual(list(op(testIntermediate)), [('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')]) self.assertEqual(list(op(combinations(range(4), 3))), [(0,1,2), (0,1,3), (0,2,3), (1,2,3)]) testIntermediate = combinations(range(4), 3) next(testIntermediate) self.assertEqual(list(op(testIntermediate)), [(0,1,3), (0,2,3), (1,2,3)]) def combinations1(iterable, r): 'Pure python version shown in the docs' pool = tuple(iterable) n = len(pool) if r > n: return indices = list(range(r)) yield tuple(pool[i] for i in indices) while 1: for i in reversed(range(r)): if indices[i] != i + n - r: break else: return indices[i] += 1 for j in range(i+1, r): indices[j] = indices[j-1] + 1 yield tuple(pool[i] for i in indices) def combinations2(iterable, r): 'Pure python version shown in the docs' pool = tuple(iterable) n = len(pool) for indices in permutations(range(n), r): if sorted(indices) == list(indices): yield tuple(pool[i] for i in indices) def combinations3(iterable, r): 'Pure python version from cwr()' pool = tuple(iterable) n = len(pool) for indices in combinations_with_replacement(range(n), r): if len(set(indices)) == r: yield tuple(pool[i] for i in indices) for n in range(7): values = [5*x-12 for x in range(n)] for r in range(n+2): result = list(combinations(values, r)) self.assertEqual(len(result), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # right number of combs self.assertEqual(len(result), len(set(result))) # no repeats self.assertEqual(result, sorted(result)) # lexicographic order for c in result: self.assertEqual(len(c), r) # r-length combinations self.assertEqual(len(set(c)), r) # no duplicate elements self.assertEqual(list(c), sorted(c)) # keep original ordering self.assertTrue(all(e in values for e in c)) # elements taken from input iterable self.assertEqual(list(c), [e for e in values if e in c]) # comb is a subsequence of the input iterable self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version self.pickletest(combinations(values, r)) # test pickling # Test implementation detail: tuple re-use @support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) def test_combinations_with_replacement(self): cwr = combinations_with_replacement self.assertRaises(TypeError, cwr, 'abc') # missing r argument self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, cwr, None) # pool is not iterable self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative for op in (lambda a:a, lambda a:pickle.loads(pickle.dumps(a))): self.assertEqual(list(op(cwr('ABC', 2))), [('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')]) testIntermediate = cwr('ABC', 2) next(testIntermediate) self.assertEqual(list(op(testIntermediate)), [('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')]) def cwr1(iterable, r): 'Pure python version shown in the docs' # number items returned: (n+r-1)! / r! / (n-1)! when n>0 pool = tuple(iterable) n = len(pool) if not n and r: return indices = [0] * r yield tuple(pool[i] for i in indices) while 1: for i in reversed(range(r)): if indices[i] != n - 1: break else: return indices[i:] = [indices[i] + 1] * (r - i) yield tuple(pool[i] for i in indices) def cwr2(iterable, r): 'Pure python version shown in the docs' pool = tuple(iterable) n = len(pool) for indices in product(range(n), repeat=r): if sorted(indices) == list(indices): yield tuple(pool[i] for i in indices) def numcombs(n, r): if not n: return 0 if r else 1 return fact(n+r-1) / fact(r)/ fact(n-1) for n in range(7): values = [5*x-12 for x in range(n)] for r in range(n+2): result = list(cwr(values, r)) self.assertEqual(len(result), numcombs(n, r)) # right number of combs self.assertEqual(len(result), len(set(result))) # no repeats self.assertEqual(result, sorted(result)) # lexicographic order regular_combs = list(combinations(values, r)) # compare to combs without replacement if n == 0 or r <= 1: self.assertEqual(result, regular_combs) # cases that should be identical else: self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs for c in result: self.assertEqual(len(c), r) # r-length combinations noruns = [k for k,v in groupby(c)] # combo without consecutive repeats self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive self.assertEqual(list(c), sorted(c)) # keep original ordering self.assertTrue(all(e in values for e in c)) # elements taken from input iterable self.assertEqual(noruns, [e for e in values if e in c]) # comb is a subsequence of the input iterable self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version self.pickletest(cwr(values,r)) # test pickling # Test implementation detail: tuple re-use @support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): cwr = combinations_with_replacement self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) def test_permutations(self): self.assertRaises(TypeError, permutations) # too few arguments self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, permutations, None) # pool is not iterable self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative self.assertEqual(list(permutations('abc', 32)), []) # r > n self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None self.assertEqual(list(permutations(range(3), 2)), [(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)]) def permutations1(iterable, r=None): 'Pure python version shown in the docs' pool = tuple(iterable) n = len(pool) r = n if r is None else r if r > n: return indices = list(range(n)) cycles = list(range(n-r+1, n+1))[::-1] yield tuple(pool[i] for i in indices[:r]) while n: for i in reversed(range(r)): cycles[i] -= 1 if cycles[i] == 0: indices[i:] = indices[i+1:] + indices[i:i+1] cycles[i] = n - i else: j = cycles[i] indices[i], indices[-j] = indices[-j], indices[i] yield tuple(pool[i] for i in indices[:r]) break else: return def permutations2(iterable, r=None): 'Pure python version shown in the docs' pool = tuple(iterable) n = len(pool) r = n if r is None else r for indices in product(range(n), repeat=r): if len(set(indices)) == r: yield tuple(pool[i] for i in indices) for n in range(7): values = [5*x-12 for x in range(n)] for r in range(n+2): result = list(permutations(values, r)) self.assertEqual(len(result), 0 if r>n else fact(n) / fact(n-r)) # right number of perms self.assertEqual(len(result), len(set(result))) # no repeats self.assertEqual(result, sorted(result)) # lexicographic order for p in result: self.assertEqual(len(p), r) # r-length permutations self.assertEqual(len(set(p)), r) # no duplicate elements self.assertTrue(all(e in values for e in p)) # elements taken from input iterable self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version if r == n: self.assertEqual(result, list(permutations(values, None))) # test r as None self.assertEqual(result, list(permutations(values))) # test default r self.pickletest(permutations(values, r)) # test pickling @support.impl_detail("tuple resuse is CPython specific") def test_permutations_tuple_reuse(self): self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) def test_combinatorics(self): # Test relationships between product(), permutations(), # combinations() and combinations_with_replacement(). for n in range(6): s = 'ABCDEFG'[:n] for r in range(8): prod = list(product(s, repeat=r)) cwr = list(combinations_with_replacement(s, r)) perm = list(permutations(s, r)) comb = list(combinations(s, r)) # Check size self.assertEqual(len(prod), n**r) self.assertEqual(len(cwr), (fact(n+r-1) / fact(r)/ fact(n-1)) if n else (not r)) self.assertEqual(len(perm), 0 if r>n else fact(n) / fact(n-r)) self.assertEqual(len(comb), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # Check lexicographic order without repeated tuples self.assertEqual(prod, sorted(set(prod))) self.assertEqual(cwr, sorted(set(cwr))) self.assertEqual(perm, sorted(set(perm))) self.assertEqual(comb, sorted(set(comb))) # Check interrelationships self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups self.assertEqual(comb, list(filter(set(cwr).__contains__, perm))) # comb: perm that is a cwr self.assertEqual(comb, list(filter(set(perm).__contains__, cwr))) # comb: cwr that is a perm self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm def test_compress(self): self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF')) self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF')) self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list('')) self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF')) self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC')) self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC')) n = 10000 data = chain.from_iterable(repeat(range(6), n)) selectors = chain.from_iterable(repeat((0, 1))) self.assertEqual(list(compress(data, selectors)), [1,3,5] * n) self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable self.assertRaises(TypeError, compress, range(6)) # too few args self.assertRaises(TypeError, compress, range(6), None) # too many args # check copy, deepcopy, pickle for op in (lambda a:copy.copy(a), lambda a:copy.deepcopy(a), lambda a:pickle.loads(pickle.dumps(a))): for data, selectors, result1, result2 in [ ('ABCDEF', [1,0,1,0,1,1], 'ACEF', 'CEF'), ('ABCDEF', [0,0,0,0,0,0], '', ''), ('ABCDEF', [1,1,1,1,1,1], 'ABCDEF', 'BCDEF'), ('ABCDEF', [1,0,1], 'AC', 'C'), ('ABC', [0,1,1,1,1,1], 'BC', 'C'), ]: self.assertEqual(list(op(compress(data=data, selectors=selectors))), list(result1)) self.assertEqual(list(op(compress(data, selectors))), list(result1)) testIntermediate = compress(data, selectors) if result1: next(testIntermediate) self.assertEqual(list(op(testIntermediate)), list(result2)) def test_count(self): self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)]) self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)]) self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)]) self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)]) self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)]) self.assertRaises(TypeError, count, 2, 3, 4) self.assertRaises(TypeError, count, 'a') self.assertEqual(list(islice(count(maxsize-5), 10)), list(range(maxsize-5, maxsize+5))) self.assertEqual(list(islice(count(-maxsize-5), 10)), list(range(-maxsize-5, -maxsize+5))) self.assertEqual(list(islice(count(10, maxsize+5), 3)), list(range(10, 10+3*(maxsize+5), maxsize+5))) c = count(3) self.assertEqual(repr(c), 'count(3)') next(c) self.assertEqual(repr(c), 'count(4)') c = count(-9) self.assertEqual(repr(c), 'count(-9)') next(c) self.assertEqual(repr(count(10.25)), 'count(10.25)') self.assertEqual(next(c), -8) for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5): # Test repr r1 = repr(count(i)) r2 = 'count(%r)'.__mod__(i) self.assertEqual(r1, r2) # check copy, deepcopy, pickle for value in -3, 3, maxsize-5, maxsize+5: c = count(value) self.assertEqual(next(copy.copy(c)), value) self.assertEqual(next(copy.deepcopy(c)), value) self.pickletest(count(value)) #check proper internal error handling for large "step' sizes count(1, maxsize+5); sys.exc_info() def test_count_with_stride(self): self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)]) self.assertEqual(lzip('abc',count(start=2,step=3)), [('a', 2), ('b', 5), ('c', 8)]) self.assertEqual(lzip('abc',count(step=-1)), [('a', 0), ('b', -1), ('c', -2)]) self.assertEqual(lzip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)]) self.assertEqual(lzip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)]) self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)]) self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3))) self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3))) self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j]) self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))), [Decimal('1.1'), Decimal('1.2'), Decimal('1.3')]) self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))), [Fraction(2,3), Fraction(17,21), Fraction(20,21)]) self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0])) c = count(3, 5) self.assertEqual(repr(c), 'count(3, 5)') next(c) self.assertEqual(repr(c), 'count(8, 5)') c = count(-9, 0) self.assertEqual(repr(c), 'count(-9, 0)') next(c) self.assertEqual(repr(c), 'count(-9, 0)') c = count(-9, -3) self.assertEqual(repr(c), 'count(-9, -3)') next(c) self.assertEqual(repr(c), 'count(-12, -3)') self.assertEqual(repr(c), 'count(-12, -3)') self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)') self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0 for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5): for j in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 1, 10, sys.maxsize-5, sys.maxsize+5): # Test repr r1 = repr(count(i, j)) if j == 1: r2 = ('count(%r)' % i) else: r2 = ('count(%r, %r)' % (i, j)) self.assertEqual(r1, r2) self.pickletest(count(i, j)) def test_cycle(self): self.assertEqual(take(10, cycle('abc')), list('abcabcabca')) self.assertEqual(list(cycle('')), []) self.assertRaises(TypeError, cycle) self.assertRaises(TypeError, cycle, 5) self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0]) # check copy, deepcopy, pickle c = cycle('abc') self.assertEqual(next(c), 'a') #simple copy currently not supported, because __reduce__ returns #an internal iterator #self.assertEqual(take(10, copy.copy(c)), list('bcabcabcab')) self.assertEqual(take(10, copy.deepcopy(c)), list('bcabcabcab')) self.assertEqual(take(10, pickle.loads(pickle.dumps(c))), list('bcabcabcab')) next(c) self.assertEqual(take(10, pickle.loads(pickle.dumps(c))), list('cabcabcabc')) self.pickletest(cycle('abc')) def test_groupby(self): # Check whether it accepts arguments correctly self.assertEqual([], list(groupby([]))) self.assertEqual([], list(groupby([], key=id))) self.assertRaises(TypeError, list, groupby('abc', [])) self.assertRaises(TypeError, groupby, None) self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10) # Check normal input s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22), (2,15,22), (3,16,23), (3,17,23)] dup = [] for k, g in groupby(s, lambda r:r[0]): for elem in g: self.assertEqual(k, elem[0]) dup.append(elem) self.assertEqual(s, dup) # Check normal pickled dup = [] for k, g in pickle.loads(pickle.dumps(groupby(s, testR))): for elem in g: self.assertEqual(k, elem[0]) dup.append(elem) self.assertEqual(s, dup) # Check nested case dup = [] for k, g in groupby(s, testR): for ik, ig in groupby(g, testR2): for elem in ig: self.assertEqual(k, elem[0]) self.assertEqual(ik, elem[2]) dup.append(elem) self.assertEqual(s, dup) # Check nested and pickled dup = [] for k, g in pickle.loads(pickle.dumps(groupby(s, testR))): for ik, ig in pickle.loads(pickle.dumps(groupby(g, testR2))): for elem in ig: self.assertEqual(k, elem[0]) self.assertEqual(ik, elem[2]) dup.append(elem) self.assertEqual(s, dup) # Check case where inner iterator is not used keys = [k for k, g in groupby(s, testR)] expectedkeys = set([r[0] for r in s]) self.assertEqual(set(keys), expectedkeys) self.assertEqual(len(keys), len(expectedkeys)) # Exercise pipes and filters style s = 'abracadabra' # sort s | uniq r = [k for k, g in groupby(sorted(s))] self.assertEqual(r, ['a', 'b', 'c', 'd', 'r']) # sort s | uniq -d r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))] self.assertEqual(r, ['a', 'b', 'r']) # sort s | uniq -c r = [(len(list(g)), k) for k, g in groupby(sorted(s))] self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')]) # sort s | uniq -c | sort -rn | head -3 r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3] self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')]) # iter.__next__ failure class ExpectedError(Exception): pass def delayed_raise(n=0): for i in range(n): yield 'yo' raise ExpectedError def gulp(iterable, keyp=None, func=list): return [func(g) for k, g in groupby(iterable, keyp)] # iter.__next__ failure on outer object self.assertRaises(ExpectedError, gulp, delayed_raise(0)) # iter.__next__ failure on inner object self.assertRaises(ExpectedError, gulp, delayed_raise(1)) # __cmp__ failure class DummyCmp: def __eq__(self, dst): raise ExpectedError s = [DummyCmp(), DummyCmp(), None] # __eq__ failure on outer object self.assertRaises(ExpectedError, gulp, s, func=id) # __eq__ failure on inner object self.assertRaises(ExpectedError, gulp, s) # keyfunc failure def keyfunc(obj): if keyfunc.skip > 0: keyfunc.skip -= 1 return obj else: raise ExpectedError # keyfunc failure on outer object keyfunc.skip = 0 self.assertRaises(ExpectedError, gulp, [None], keyfunc) keyfunc.skip = 1 self.assertRaises(ExpectedError, gulp, [None, None], keyfunc) def test_filter(self): self.assertEqual(list(filter(isEven, range(6))), [0,2,4]) self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2]) self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2]) self.assertEqual(take(4, filter(isEven, count())), [0,2,4,6]) self.assertRaises(TypeError, filter) self.assertRaises(TypeError, filter, lambda x:x) self.assertRaises(TypeError, filter, lambda x:x, range(6), 7) self.assertRaises(TypeError, filter, isEven, 3) self.assertRaises(TypeError, next, filter(range(6), range(6))) # check copy, deepcopy, pickle ans = [0,2,4] c = filter(isEven, range(6)) self.assertEqual(list(copy.copy(c)), ans) c = filter(isEven, range(6)) self.assertEqual(list(copy.deepcopy(c)), ans) c = filter(isEven, range(6)) self.assertEqual(list(pickle.loads(pickle.dumps(c))), ans) next(c) self.assertEqual(list(pickle.loads(pickle.dumps(c))), ans[1:]) c = filter(isEven, range(6)) self.pickletest(c) def test_filterfalse(self): self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5]) self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0]) self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0]) self.assertEqual(take(4, filterfalse(isEven, count())), [1,3,5,7]) self.assertRaises(TypeError, filterfalse) self.assertRaises(TypeError, filterfalse, lambda x:x) self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7) self.assertRaises(TypeError, filterfalse, isEven, 3) self.assertRaises(TypeError, next, filterfalse(range(6), range(6))) self.pickletest(filterfalse(isEven, range(6))) def test_zip(self): # XXX This is rather silly now that builtin zip() calls zip()... ans = [(x,y) for x, y in zip('abc',count())] self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6))) self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3))) self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3))) self.assertEqual(list(zip('abcdef')), lzip('abcdef')) self.assertEqual(list(zip()), lzip()) self.assertRaises(TypeError, zip, 3) self.assertRaises(TypeError, zip, range(3), 3) self.assertEqual([tuple(list(pair)) for pair in zip('abc', 'def')], lzip('abc', 'def')) self.assertEqual([pair for pair in zip('abc', 'def')], lzip('abc', 'def')) @support.impl_detail("tuple reuse is specific to CPython") def test_zip_tuple_reuse(self): ids = list(map(id, zip('abc', 'def'))) self.assertEqual(min(ids), max(ids)) ids = list(map(id, list(zip('abc', 'def')))) self.assertEqual(len(dict.fromkeys(ids)), len(ids)) # check copy, deepcopy, pickle ans = [(x,y) for x, y in copy.copy(zip('abc',count()))] self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) ans = [(x,y) for x, y in copy.deepcopy(zip('abc',count()))] self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) ans = [(x,y) for x, y in pickle.loads(pickle.dumps(zip('abc',count())))] self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) testIntermediate = zip('abc',count()) next(testIntermediate) ans = [(x,y) for x, y in pickle.loads(pickle.dumps(testIntermediate))] self.assertEqual(ans, [('b', 1), ('c', 2)]) self.pickletest(zip('abc', count())) def test_ziplongest(self): for args in [ ['abc', range(6)], [range(6), 'abc'], [range(1000), range(2000,2100), range(3000,3050)], [range(1000), range(0), range(3000,3050), range(1200), range(1500)], [range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)], ]: target = [tuple([arg[i] if i < len(arg) else None for arg in args]) for i in range(max(map(len, args)))] self.assertEqual(list(zip_longest(*args)), target) self.assertEqual(list(zip_longest(*args, **{})), target) target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X' self.assertEqual(list(zip_longest(*args, **dict(fillvalue='X'))), target) self.assertEqual(take(3,zip_longest('abcdef', count())), list(zip('abcdef', range(3)))) # take 3 from infinite input self.assertEqual(list(zip_longest()), list(zip())) self.assertEqual(list(zip_longest([])), list(zip([]))) self.assertEqual(list(zip_longest('abcdef')), list(zip('abcdef'))) self.assertEqual(list(zip_longest('abc', 'defg', **{})), list(zip(list('abc')+[None], 'defg'))) # empty keyword dict self.assertRaises(TypeError, zip_longest, 3) self.assertRaises(TypeError, zip_longest, range(3), 3) for stmt in [ "zip_longest('abc', fv=1)", "zip_longest('abc', fillvalue=1, bogus_keyword=None)", ]: try: eval(stmt, globals(), locals()) except TypeError: pass else: self.fail('Did not raise Type in: ' + stmt) self.assertEqual([tuple(list(pair)) for pair in zip_longest('abc', 'def')], list(zip('abc', 'def'))) self.assertEqual([pair for pair in zip_longest('abc', 'def')], list(zip('abc', 'def'))) @support.impl_detail("tuple reuse is specific to CPython") def test_zip_longest_tuple_reuse(self): ids = list(map(id, zip_longest('abc', 'def'))) self.assertEqual(min(ids), max(ids)) ids = list(map(id, list(zip_longest('abc', 'def')))) self.assertEqual(len(dict.fromkeys(ids)), len(ids)) def test_zip_longest_pickling(self): self.pickletest(zip_longest("abc", "def")) self.pickletest(zip_longest("abc", "defgh")) self.pickletest(zip_longest("abc", "defgh", fillvalue=1)) self.pickletest(zip_longest("", "defgh")) def test_bug_7244(self): class Repeater: # this class is similar to itertools.repeat def __init__(self, o, t, e): self.o = o self.t = int(t) self.e = e def __iter__(self): # its iterator is itself return self def __next__(self): if self.t > 0: self.t -= 1 return self.o else: raise self.e # Formerly this code in would fail in debug mode # with Undetected Error and Stop Iteration r1 = Repeater(1, 3, StopIteration) r2 = Repeater(2, 4, StopIteration) def run(r1, r2): result = [] for i, j in zip_longest(r1, r2, fillvalue=0): with support.captured_output('stdout'): print((i, j)) result.append((i, j)) return result self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)]) # Formerly, the RuntimeError would be lost # and StopIteration would stop as expected r1 = Repeater(1, 3, RuntimeError) r2 = Repeater(2, 4, StopIteration) it = zip_longest(r1, r2, fillvalue=0) self.assertEqual(next(it), (1, 2)) self.assertEqual(next(it), (1, 2)) self.assertEqual(next(it), (1, 2)) self.assertRaises(RuntimeError, next, it) def test_product(self): for args, result in [ ([], [()]), # zero iterables (['ab'], [('a',), ('b',)]), # one iterable ([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables ([range(0), range(2), range(3)], []), # first iterable with zero length ([range(2), range(0), range(3)], []), # middle iterable with zero length ([range(2), range(3), range(0)], []), # last iterable with zero length ]: self.assertEqual(list(product(*args)), result) for r in range(4): self.assertEqual(list(product(*(args*r))), list(product(*args, **dict(repeat=r)))) self.assertEqual(len(list(product(*[range(7)]*6))), 7**6) self.assertRaises(TypeError, product, range(6), None) def product1(*args, **kwds): pools = list(map(tuple, args)) * kwds.get('repeat', 1) n = len(pools) if n == 0: yield () return if any(len(pool) == 0 for pool in pools): return indices = [0] * n yield tuple(pool[i] for pool, i in zip(pools, indices)) while 1: for i in reversed(range(n)): # right to left if indices[i] == len(pools[i]) - 1: continue indices[i] += 1 for j in range(i+1, n): indices[j] = 0 yield tuple(pool[i] for pool, i in zip(pools, indices)) break else: return def product2(*args, **kwds): 'Pure python version used in docs' pools = list(map(tuple, args)) * kwds.get('repeat', 1) result = [[]] for pool in pools: result = [x+[y] for x in result for y in pool] for prod in result: yield tuple(prod) argtypes = ['', 'abc', '', range(0), range(4), dict(a=1, b=2, c=3), set('abcdefg'), range(11), tuple(range(13))] for i in range(100): args = [random.choice(argtypes) for j in range(random.randrange(5))] expected_len = prod(map(len, args)) self.assertEqual(len(list(product(*args))), expected_len) self.assertEqual(list(product(*args)), list(product1(*args))) self.assertEqual(list(product(*args)), list(product2(*args))) args = map(iter, args) self.assertEqual(len(list(product(*args))), expected_len) @support.impl_detail("tuple reuse is specific to CPython") def test_product_tuple_reuse(self): self.assertEqual(len(set(map(id, product('abc', 'def')))), 1) self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1) def test_product_pickling(self): # check copy, deepcopy, pickle for args, result in [ ([], [()]), # zero iterables (['ab'], [('a',), ('b',)]), # one iterable ([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables ([range(0), range(2), range(3)], []), # first iterable with zero length ([range(2), range(0), range(3)], []), # middle iterable with zero length ([range(2), range(3), range(0)], []), # last iterable with zero length ]: self.assertEqual(list(copy.copy(product(*args))), result) self.assertEqual(list(copy.deepcopy(product(*args))), result) self.pickletest(product(*args)) def test_repeat(self): self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a']) self.assertEqual(lzip(range(3),repeat('a')), [(0, 'a'), (1, 'a'), (2, 'a')]) self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a']) self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a']) self.assertEqual(list(repeat('a', 0)), []) self.assertEqual(list(repeat('a', -3)), []) self.assertRaises(TypeError, repeat) self.assertRaises(TypeError, repeat, None, 3, 4) self.assertRaises(TypeError, repeat, None, 'a') r = repeat(1+0j) self.assertEqual(repr(r), 'repeat((1+0j))') r = repeat(1+0j, 5) self.assertEqual(repr(r), 'repeat((1+0j), 5)') list(r) self.assertEqual(repr(r), 'repeat((1+0j), 0)') # check copy, deepcopy, pickle c = repeat(object='a', times=10) self.assertEqual(next(c), 'a') self.assertEqual(take(2, copy.copy(c)), list('a' * 2)) self.assertEqual(take(2, copy.deepcopy(c)), list('a' * 2)) self.pickletest(repeat(object='a', times=10)) def test_map(self): self.assertEqual(list(map(operator.pow, range(3), range(1,7))), [0**1, 1**2, 2**3]) self.assertEqual(list(map(tupleize, 'abc', range(5))), [('a',0),('b',1),('c',2)]) self.assertEqual(list(map(tupleize, 'abc', count())), [('a',0),('b',1),('c',2)]) self.assertEqual(take(2,map(tupleize, 'abc', count())), [('a',0),('b',1)]) self.assertEqual(list(map(operator.pow, [])), []) self.assertRaises(TypeError, map) self.assertRaises(TypeError, list, map(None, range(3), range(3))) self.assertRaises(TypeError, map, operator.neg) self.assertRaises(TypeError, next, map(10, range(5))) self.assertRaises(ValueError, next, map(errfunc, [4], [5])) self.assertRaises(TypeError, next, map(onearg, [4], [5])) # check copy, deepcopy, pickle ans = [('a',0),('b',1),('c',2)] c = map(tupleize, 'abc', count()) self.assertEqual(list(copy.copy(c)), ans) c = map(tupleize, 'abc', count()) self.assertEqual(list(copy.deepcopy(c)), ans) c = map(tupleize, 'abc', count()) self.pickletest(c) def test_starmap(self): self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))), [0**1, 1**2, 2**3]) self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))), [0**1, 1**2, 2**3]) self.assertEqual(list(starmap(operator.pow, [])), []) self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5]) self.assertRaises(TypeError, list, starmap(operator.pow, [None])) self.assertRaises(TypeError, starmap) self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra') self.assertRaises(TypeError, next, starmap(10, [(4,5)])) self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)])) self.assertRaises(TypeError, next, starmap(onearg, [(4,5)])) # check copy, deepcopy, pickle ans = [0**1, 1**2, 2**3] c = starmap(operator.pow, zip(range(3), range(1,7))) self.assertEqual(list(copy.copy(c)), ans) c = starmap(operator.pow, zip(range(3), range(1,7))) self.assertEqual(list(copy.deepcopy(c)), ans) c = starmap(operator.pow, zip(range(3), range(1,7))) self.pickletest(c) def test_islice(self): for args in [ # islice(args) should agree with range(args) (10, 20, 3), (10, 3, 20), (10, 20), (10, 3), (20,) ]: self.assertEqual(list(islice(range(100), *args)), list(range(*args))) for args, tgtargs in [ # Stop when seqn is exhausted ((10, 110, 3), ((10, 100, 3))), ((10, 110), ((10, 100))), ((110,), (100,)) ]: self.assertEqual(list(islice(range(100), *args)), list(range(*tgtargs))) # Test stop=None self.assertEqual(list(islice(range(10), None)), list(range(10))) self.assertEqual(list(islice(range(10), None, None)), list(range(10))) self.assertEqual(list(islice(range(10), None, None, None)), list(range(10))) self.assertEqual(list(islice(range(10), 2, None)), list(range(2, 10))) self.assertEqual(list(islice(range(10), 1, None, 2)), list(range(1, 10, 2))) # Test number of items consumed SF #1171417 it = iter(range(10)) self.assertEqual(list(islice(it, 3)), list(range(3))) self.assertEqual(list(it), list(range(3, 10))) # Test invalid arguments ra = range(10) self.assertRaises(TypeError, islice, ra) self.assertRaises(TypeError, islice, ra, 1, 2, 3, 4) self.assertRaises(ValueError, islice, ra, -5, 10, 1) self.assertRaises(ValueError, islice, ra, 1, -5, -1) self.assertRaises(ValueError, islice, ra, 1, 10, -1) self.assertRaises(ValueError, islice, ra, 1, 10, 0) self.assertRaises(ValueError, islice, ra, 'a') self.assertRaises(ValueError, islice, ra, 'a', 1) self.assertRaises(ValueError, islice, ra, 1, 'a') self.assertRaises(ValueError, islice, ra, 'a', 1, 1) self.assertRaises(ValueError, islice, ra, 1, 'a', 1) self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1) # Issue #10323: Less islice in a predictable state c = count() self.assertEqual(list(islice(c, 1, 3, 50)), [1]) self.assertEqual(next(c), 3) # check copy, deepcopy, pickle for args in [ # islice(args) should agree with range(args) (10, 20, 3), (10, 3, 20), (10, 20), (10, 3), (20,) ]: self.assertEqual(list(copy.copy(islice(range(100), *args))), list(range(*args))) self.assertEqual(list(copy.deepcopy(islice(range(100), *args))), list(range(*args))) self.pickletest(islice(range(100), *args)) def test_takewhile(self): data = [1, 3, 5, 20, 2, 4, 6, 8] self.assertEqual(list(takewhile(underten, data)), [1, 3, 5]) self.assertEqual(list(takewhile(underten, [])), []) self.assertRaises(TypeError, takewhile) self.assertRaises(TypeError, takewhile, operator.pow) self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra') self.assertRaises(TypeError, next, takewhile(10, [(4,5)])) self.assertRaises(ValueError, next, takewhile(errfunc, [(4,5)])) t = takewhile(bool, [1, 1, 1, 0, 0, 0]) self.assertEqual(list(t), [1, 1, 1]) self.assertRaises(StopIteration, next, t) # check copy, deepcopy, pickle self.assertEqual(list(copy.copy(takewhile(underten, data))), [1, 3, 5]) self.assertEqual(list(copy.deepcopy(takewhile(underten, data))), [1, 3, 5]) self.pickletest(takewhile(underten, data)) def test_dropwhile(self): data = [1, 3, 5, 20, 2, 4, 6, 8] self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8]) self.assertEqual(list(dropwhile(underten, [])), []) self.assertRaises(TypeError, dropwhile) self.assertRaises(TypeError, dropwhile, operator.pow) self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra') self.assertRaises(TypeError, next, dropwhile(10, [(4,5)])) self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)])) # check copy, deepcopy, pickle self.assertEqual(list(copy.copy(dropwhile(underten, data))), [20, 2, 4, 6, 8]) self.assertEqual(list(copy.deepcopy(dropwhile(underten, data))), [20, 2, 4, 6, 8]) self.pickletest(dropwhile(underten, data)) def test_tee(self): n = 200 a, b = tee([]) # test empty iterator self.assertEqual(list(a), []) self.assertEqual(list(b), []) a, b = tee(irange(n)) # test 100% interleaved self.assertEqual(lzip(a,b), lzip(range(n), range(n))) a, b = tee(irange(n)) # test 0% interleaved self.assertEqual(list(a), list(range(n))) self.assertEqual(list(b), list(range(n))) a, b = tee(irange(n)) # test dealloc of leading iterator for i in range(100): self.assertEqual(next(a), i) del a self.assertEqual(list(b), list(range(n))) a, b = tee(irange(n)) # test dealloc of trailing iterator for i in range(100): self.assertEqual(next(a), i) del b self.assertEqual(list(a), list(range(100, n))) for j in range(5): # test randomly interleaved order = [0]*n + [1]*n random.shuffle(order) lists = ([], []) its = tee(irange(n)) for i in order: value = next(its[i]) lists[i].append(value) self.assertEqual(lists[0], list(range(n))) self.assertEqual(lists[1], list(range(n))) # test argument format checking self.assertRaises(TypeError, tee) self.assertRaises(TypeError, tee, 3) self.assertRaises(TypeError, tee, [1,2], 'x') self.assertRaises(TypeError, tee, [1,2], 3, 'x') # tee object should be instantiable a, b = tee('abc') c = type(a)('def') self.assertEqual(list(c), list('def')) # test long-lagged and multi-way split a, b, c = tee(range(2000), 3) for i in range(100): self.assertEqual(next(a), i) self.assertEqual(list(b), list(range(2000))) self.assertEqual([next(c), next(c)], list(range(2))) self.assertEqual(list(a), list(range(100,2000))) self.assertEqual(list(c), list(range(2,2000))) # test values of n self.assertRaises(TypeError, tee, 'abc', 'invalid') self.assertRaises(ValueError, tee, [], -1) for n in range(5): result = tee('abc', n) self.assertEqual(type(result), tuple) self.assertEqual(len(result), n) self.assertEqual([list(x) for x in result], [list('abc')]*n) # tee pass-through to copyable iterator a, b = tee('abc') c, d = tee(a) self.assertTrue(a is c) # test tee_new t1, t2 = tee('abc') tnew = type(t1) self.assertRaises(TypeError, tnew) self.assertRaises(TypeError, tnew, 10) t3 = tnew(t1) self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc')) # test that tee objects are weak referencable a, b = tee(range(10)) p = proxy(a) self.assertEqual(getattr(p, '__class__'), type(b)) del a support.gc_collect() self.assertRaises(ReferenceError, getattr, p, '__class__') ans = list('abc') long_ans = list(range(10000)) # check copy a, b = tee('abc') self.assertEqual(list(copy.copy(a)), ans) self.assertEqual(list(copy.copy(b)), ans) a, b = tee(list(range(10000))) self.assertEqual(list(copy.copy(a)), long_ans) self.assertEqual(list(copy.copy(b)), long_ans) # check partially consumed copy a, b = tee('abc') take(2, a) take(1, b) self.assertEqual(list(copy.copy(a)), ans[2:]) self.assertEqual(list(copy.copy(b)), ans[1:]) self.assertEqual(list(a), ans[2:]) self.assertEqual(list(b), ans[1:]) a, b = tee(range(10000)) take(100, a) take(60, b) self.assertEqual(list(copy.copy(a)), long_ans[100:]) self.assertEqual(list(copy.copy(b)), long_ans[60:]) self.assertEqual(list(a), long_ans[100:]) self.assertEqual(list(b), long_ans[60:]) # check deepcopy a, b = tee('abc') self.assertEqual(list(copy.deepcopy(a)), ans) self.assertEqual(list(copy.deepcopy(b)), ans) self.assertEqual(list(a), ans) self.assertEqual(list(b), ans) a, b = tee(range(10000)) self.assertEqual(list(copy.deepcopy(a)), long_ans) self.assertEqual(list(copy.deepcopy(b)), long_ans) self.assertEqual(list(a), long_ans) self.assertEqual(list(b), long_ans) # check partially consumed deepcopy a, b = tee('abc') take(2, a) take(1, b) self.assertEqual(list(copy.deepcopy(a)), ans[2:]) self.assertEqual(list(copy.deepcopy(b)), ans[1:]) self.assertEqual(list(a), ans[2:]) self.assertEqual(list(b), ans[1:]) a, b = tee(range(10000)) take(100, a) take(60, b) self.assertEqual(list(copy.deepcopy(a)), long_ans[100:]) self.assertEqual(list(copy.deepcopy(b)), long_ans[60:]) self.assertEqual(list(a), long_ans[100:]) self.assertEqual(list(b), long_ans[60:]) # check pickle self.pickletest(iter(tee('abc'))) a, b = tee('abc') self.pickletest(a, compare=ans) self.pickletest(b, compare=ans) # Issue 13454: Crash when deleting backward iterator from tee() def test_tee_del_backward(self): forward, backward = tee(repeat(None, 20000000)) any(forward) # exhaust the iterator del backward def test_StopIteration(self): self.assertRaises(StopIteration, next, zip()) for f in (chain, cycle, zip, groupby): self.assertRaises(StopIteration, next, f([])) self.assertRaises(StopIteration, next, f(StopNow())) self.assertRaises(StopIteration, next, islice([], None)) self.assertRaises(StopIteration, next, islice(StopNow(), None)) p, q = tee([]) self.assertRaises(StopIteration, next, p) self.assertRaises(StopIteration, next, q) p, q = tee(StopNow()) self.assertRaises(StopIteration, next, p) self.assertRaises(StopIteration, next, q) self.assertRaises(StopIteration, next, repeat(None, 0)) for f in (filter, filterfalse, map, takewhile, dropwhile, starmap): self.assertRaises(StopIteration, next, f(lambda x:x, [])) self.assertRaises(StopIteration, next, f(lambda x:x, StopNow())) class TestExamples(unittest.TestCase): def test_accumulate(self): self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15]) def test_accumulate_reducible(self): # check copy, deepcopy, pickle data = [1, 2, 3, 4, 5] accumulated = [1, 3, 6, 10, 15] it = accumulate(data) self.assertEqual(list(pickle.loads(pickle.dumps(it))), accumulated[:]) self.assertEqual(next(it), 1) self.assertEqual(list(pickle.loads(pickle.dumps(it))), accumulated[1:]) self.assertEqual(list(copy.deepcopy(it)), accumulated[1:]) self.assertEqual(list(copy.copy(it)), accumulated[1:]) def test_chain(self): self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF') def test_chain_from_iterable(self): self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF') def test_combinations(self): self.assertEqual(list(combinations('ABCD', 2)), [('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')]) self.assertEqual(list(combinations(range(4), 3)), [(0,1,2), (0,1,3), (0,2,3), (1,2,3)]) def test_combinations_with_replacement(self): self.assertEqual(list(combinations_with_replacement('ABC', 2)), [('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')]) def test_compress(self): self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF')) def test_count(self): self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14]) def test_cycle(self): self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD')) def test_dropwhile(self): self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1]) def test_groupby(self): self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')], list('ABCDAB')) self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')], [list('AAAA'), list('BBB'), list('CC'), list('D')]) def test_filter(self): self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9]) def test_filterfalse(self): self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8]) def test_map(self): self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000]) def test_islice(self): self.assertEqual(list(islice('ABCDEFG', 2)), list('AB')) self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD')) self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG')) self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG')) def test_zip(self): self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')]) def test_zip_longest(self): self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')), [('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')]) def test_permutations(self): self.assertEqual(list(permutations('ABCD', 2)), list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split()))) self.assertEqual(list(permutations(range(3))), [(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)]) def test_product(self): self.assertEqual(list(product('ABCD', 'xy')), list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split()))) self.assertEqual(list(product(range(2), repeat=3)), [(0,0,0), (0,0,1), (0,1,0), (0,1,1), (1,0,0), (1,0,1), (1,1,0), (1,1,1)]) def test_repeat(self): self.assertEqual(list(repeat(10, 3)), [10, 10, 10]) def test_stapmap(self): self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])), [32, 9, 1000]) def test_takewhile(self): self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4]) class TestGC(unittest.TestCase): def makecycle(self, iterator, container): container.append(iterator) next(iterator) del container, iterator def test_accumulate(self): a = [] self.makecycle(accumulate([1,2,a,3]), a) def test_chain(self): a = [] self.makecycle(chain(a), a) def test_chain_from_iterable(self): a = [] self.makecycle(chain.from_iterable([a]), a) def test_combinations(self): a = [] self.makecycle(combinations([1,2,a,3], 3), a) def test_combinations_with_replacement(self): a = [] self.makecycle(combinations_with_replacement([1,2,a,3], 3), a) def test_compress(self): a = [] self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a) def test_count(self): a = [] Int = type('Int', (int,), dict(x=a)) self.makecycle(count(Int(0), Int(1)), a) def test_cycle(self): a = [] self.makecycle(cycle([a]*2), a) def test_dropwhile(self): a = [] self.makecycle(dropwhile(bool, [0, a, a]), a) def test_groupby(self): a = [] self.makecycle(groupby([a]*2, lambda x:x), a) def test_issue2246(self): # Issue 2246 -- the _grouper iterator was not included in GC n = 10 keyfunc = lambda x: x for i, j in groupby(range(n), key=keyfunc): keyfunc.__dict__.setdefault('x',[]).append(j) def test_filter(self): a = [] self.makecycle(filter(lambda x:True, [a]*2), a) def test_filterfalse(self): a = [] self.makecycle(filterfalse(lambda x:False, a), a) def test_zip(self): a = [] self.makecycle(zip([a]*2, [a]*3), a) def test_zip_longest(self): a = [] self.makecycle(zip_longest([a]*2, [a]*3), a) b = [a, None] self.makecycle(zip_longest([a]*2, [a]*3, fillvalue=b), a) def test_map(self): a = [] self.makecycle(map(lambda x:x, [a]*2), a) def test_islice(self): a = [] self.makecycle(islice([a]*2, None), a) def test_permutations(self): a = [] self.makecycle(permutations([1,2,a,3], 3), a) def test_product(self): a = [] self.makecycle(product([1,2,a,3], repeat=3), a) def test_repeat(self): a = [] self.makecycle(repeat(a), a) def test_starmap(self): a = [] self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a) def test_takewhile(self): a = [] self.makecycle(takewhile(bool, [1, 0, a, a]), a) def R(seqn): 'Regular generator' for i in seqn: yield i class G: 'Sequence using __getitem__' def __init__(self, seqn): self.seqn = seqn def __getitem__(self, i): return self.seqn[i] class I: 'Sequence using iterator protocol' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): return self def __next__(self): if self.i >= len(self.seqn): raise StopIteration v = self.seqn[self.i] self.i += 1 return v class Ig: 'Sequence using iterator protocol defined with a generator' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): for val in self.seqn: yield val class X: 'Missing __getitem__ and __iter__' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __next__(self): if self.i >= len(self.seqn): raise StopIteration v = self.seqn[self.i] self.i += 1 return v class N: 'Iterator missing __next__()' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): return self class E: 'Test propagation of exceptions' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): return self def __next__(self): 3 // 0 class S: 'Test immediate stop' def __init__(self, seqn): pass def __iter__(self): return self def __next__(self): raise StopIteration def L(seqn): 'Test multiple tiers of iterators' return chain(map(lambda x:x, R(Ig(G(seqn))))) class TestVariousIteratorArgs(unittest.TestCase): def test_accumulate(self): s = [1,2,3,4,5] r = [1,3,6,10,15] n = len(s) for g in (G, I, Ig, L, R): self.assertEqual(list(accumulate(g(s))), r) self.assertEqual(list(accumulate(S(s))), []) self.assertRaises(TypeError, accumulate, X(s)) self.assertRaises(TypeError, accumulate, N(s)) self.assertRaises(ZeroDivisionError, list, accumulate(E(s))) def test_chain(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): self.assertEqual(list(chain(g(s))), list(g(s))) self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s))) self.assertRaises(TypeError, list, chain(X(s))) self.assertRaises(TypeError, list, chain(N(s))) self.assertRaises(ZeroDivisionError, list, chain(E(s))) def test_compress(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): n = len(s) for g in (G, I, Ig, S, L, R): self.assertEqual(list(compress(g(s), repeat(1))), list(g(s))) self.assertRaises(TypeError, compress, X(s), repeat(1)) self.assertRaises(TypeError, compress, N(s), repeat(1)) self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1))) def test_product(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): self.assertRaises(TypeError, product, X(s)) self.assertRaises(TypeError, product, N(s)) self.assertRaises(ZeroDivisionError, product, E(s)) def test_cycle(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): tgtlen = len(s) * 3 expected = list(g(s))*3 actual = list(islice(cycle(g(s)), tgtlen)) self.assertEqual(actual, expected) self.assertRaises(TypeError, cycle, X(s)) self.assertRaises(TypeError, cycle, N(s)) self.assertRaises(ZeroDivisionError, list, cycle(E(s))) def test_groupby(self): for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): self.assertEqual([k for k, sb in groupby(g(s))], list(g(s))) self.assertRaises(TypeError, groupby, X(s)) self.assertRaises(TypeError, groupby, N(s)) self.assertRaises(ZeroDivisionError, list, groupby(E(s))) def test_filter(self): for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): self.assertEqual(list(filter(isEven, g(s))), [x for x in g(s) if isEven(x)]) self.assertRaises(TypeError, filter, isEven, X(s)) self.assertRaises(TypeError, filter, isEven, N(s)) self.assertRaises(ZeroDivisionError, list, filter(isEven, E(s))) def test_filterfalse(self): for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): self.assertEqual(list(filterfalse(isEven, g(s))), [x for x in g(s) if isOdd(x)]) self.assertRaises(TypeError, filterfalse, isEven, X(s)) self.assertRaises(TypeError, filterfalse, isEven, N(s)) self.assertRaises(ZeroDivisionError, list, filterfalse(isEven, E(s))) def test_zip(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): self.assertEqual(list(zip(g(s))), lzip(g(s))) self.assertEqual(list(zip(g(s), g(s))), lzip(g(s), g(s))) self.assertRaises(TypeError, zip, X(s)) self.assertRaises(TypeError, zip, N(s)) self.assertRaises(ZeroDivisionError, list, zip(E(s))) def test_ziplongest(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): self.assertEqual(list(zip_longest(g(s))), list(zip(g(s)))) self.assertEqual(list(zip_longest(g(s), g(s))), list(zip(g(s), g(s)))) self.assertRaises(TypeError, zip_longest, X(s)) self.assertRaises(TypeError, zip_longest, N(s)) self.assertRaises(ZeroDivisionError, list, zip_longest(E(s))) def test_map(self): for s in (range(10), range(0), range(100), (7,11), range(20,50,5)): for g in (G, I, Ig, S, L, R): self.assertEqual(list(map(onearg, g(s))), [onearg(x) for x in g(s)]) self.assertEqual(list(map(operator.pow, g(s), g(s))), [x**x for x in g(s)]) self.assertRaises(TypeError, map, onearg, X(s)) self.assertRaises(TypeError, map, onearg, N(s)) self.assertRaises(ZeroDivisionError, list, map(onearg, E(s))) def test_islice(self): for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2]) self.assertRaises(TypeError, islice, X(s), 10) self.assertRaises(TypeError, islice, N(s), 10) self.assertRaises(ZeroDivisionError, list, islice(E(s), 10)) def test_starmap(self): for s in (range(10), range(0), range(100), (7,11), range(20,50,5)): for g in (G, I, Ig, S, L, R): ss = lzip(s, s) self.assertEqual(list(starmap(operator.pow, g(ss))), [x**x for x in g(s)]) self.assertRaises(TypeError, starmap, operator.pow, X(ss)) self.assertRaises(TypeError, starmap, operator.pow, N(ss)) self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss))) def test_takewhile(self): for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): tgt = [] for elem in g(s): if not isEven(elem): break tgt.append(elem) self.assertEqual(list(takewhile(isEven, g(s))), tgt) self.assertRaises(TypeError, takewhile, isEven, X(s)) self.assertRaises(TypeError, takewhile, isEven, N(s)) self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s))) def test_dropwhile(self): for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): tgt = [] for elem in g(s): if not tgt and isOdd(elem): continue tgt.append(elem) self.assertEqual(list(dropwhile(isOdd, g(s))), tgt) self.assertRaises(TypeError, dropwhile, isOdd, X(s)) self.assertRaises(TypeError, dropwhile, isOdd, N(s)) self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s))) def test_tee(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): it1, it2 = tee(g(s)) self.assertEqual(list(it1), list(g(s))) self.assertEqual(list(it2), list(g(s))) self.assertRaises(TypeError, tee, X(s)) self.assertRaises(TypeError, tee, N(s)) self.assertRaises(ZeroDivisionError, list, tee(E(s))[0]) class LengthTransparency(unittest.TestCase): @support.impl_detail("__length_hint__() API is undocumented") def test_repeat(self): from test.test_iterlen import len self.assertEqual(len(repeat(None, 50)), 50) self.assertRaises(TypeError, len, repeat(None)) class RegressionTests(unittest.TestCase): def test_sf_793826(self): # Fix Armin Rigo's successful efforts to wreak havoc def mutatingtuple(tuple1, f, tuple2): # this builds a tuple t which is a copy of tuple1, # then calls f(t), then mutates t to be equal to tuple2 # (needs len(tuple1) == len(tuple2)). def g(value, first=[1]): if first: del first[:] f(next(z)) return value items = list(tuple2) items[1:1] = list(tuple1) gen = map(g, items) z = zip(*[gen]*len(tuple1)) next(z) def f(t): global T T = t first[:] = list(T) first = [] mutatingtuple((1,2,3), f, (4,5,6)) second = list(T) self.assertEqual(first, second) def test_sf_950057(self): # Make sure that chain() and cycle() catch exceptions immediately # rather than when shifting between input sources def gen1(): hist.append(0) yield 1 hist.append(1) raise AssertionError hist.append(2) def gen2(x): hist.append(3) yield 2 hist.append(4) if x: raise StopIteration hist = [] self.assertRaises(AssertionError, list, chain(gen1(), gen2(False))) self.assertEqual(hist, [0,1]) hist = [] self.assertRaises(AssertionError, list, chain(gen1(), gen2(True))) self.assertEqual(hist, [0,1]) hist = [] self.assertRaises(AssertionError, list, cycle(gen1())) self.assertEqual(hist, [0,1]) class SubclassWithKwargsTest(unittest.TestCase): def test_keywords_in_subclass(self): # count is not subclassable... for cls in (repeat, zip, filter, filterfalse, chain, map, starmap, islice, takewhile, dropwhile, cycle, compress): class Subclass(cls): def __init__(self, newarg=None, *args): cls.__init__(self, *args) try: Subclass(newarg=1) except TypeError as err: # we expect type errors because of wrong argument count self.assertNotIn("does not take keyword arguments", err.args[0]) libreftest = """ Doctest for examples in the library reference: libitertools.tex >>> amounts = [120.15, 764.05, 823.14] >>> for checknum, amount in zip(count(1200), amounts): ... print('Check %d is for $%.2f' % (checknum, amount)) ... Check 1200 is for $120.15 Check 1201 is for $764.05 Check 1202 is for $823.14 >>> import operator >>> for cube in map(operator.pow, range(1,4), repeat(3)): ... print(cube) ... 1 8 27 >>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele'] >>> for name in islice(reportlines, 3, None, 2): ... print(name.title()) ... Alex Laura Martin Walter Samuele >>> from operator import itemgetter >>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3) >>> di = sorted(sorted(d.items()), key=itemgetter(1)) >>> for k, g in groupby(di, itemgetter(1)): ... print(k, list(map(itemgetter(0), g))) ... 1 ['a', 'c', 'e'] 2 ['b', 'd', 'f'] 3 ['g'] # Find runs of consecutive numbers using groupby. The key to the solution # is differencing with a range so that consecutive numbers all appear in # same group. >>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28] >>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]): ... print(list(map(operator.itemgetter(1), g))) ... [1] [4, 5, 6] [10] [15, 16, 17, 18] [22] [25, 26, 27, 28] >>> def take(n, iterable): ... "Return first n items of the iterable as a list" ... return list(islice(iterable, n)) >>> def enumerate(iterable, start=0): ... return zip(count(start), iterable) >>> def tabulate(function, start=0): ... "Return function(0), function(1), ..." ... return map(function, count(start)) >>> def nth(iterable, n, default=None): ... "Returns the nth item or a default value" ... return next(islice(iterable, n, None), default) >>> def quantify(iterable, pred=bool): ... "Count how many times the predicate is true" ... return sum(map(pred, iterable)) >>> def padnone(iterable): ... "Returns the sequence elements and then returns None indefinitely" ... return chain(iterable, repeat(None)) >>> def ncycles(iterable, n): ... "Returns the sequence elements n times" ... return chain(*repeat(iterable, n)) >>> def dotproduct(vec1, vec2): ... return sum(map(operator.mul, vec1, vec2)) >>> def flatten(listOfLists): ... return list(chain.from_iterable(listOfLists)) >>> def repeatfunc(func, times=None, *args): ... "Repeat calls to func with specified arguments." ... " Example: repeatfunc(random.random)" ... if times is None: ... return starmap(func, repeat(args)) ... else: ... return starmap(func, repeat(args, times)) >>> def pairwise(iterable): ... "s -> (s0,s1), (s1,s2), (s2, s3), ..." ... a, b = tee(iterable) ... try: ... next(b) ... except StopIteration: ... pass ... return zip(a, b) >>> def grouper(n, iterable, fillvalue=None): ... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx" ... args = [iter(iterable)] * n ... return zip_longest(*args, fillvalue=fillvalue) >>> def roundrobin(*iterables): ... "roundrobin('ABC', 'D', 'EF') --> A D E B F C" ... # Recipe credited to George Sakkis ... pending = len(iterables) ... nexts = cycle(iter(it).__next__ for it in iterables) ... while pending: ... try: ... for next in nexts: ... yield next() ... except StopIteration: ... pending -= 1 ... nexts = cycle(islice(nexts, pending)) >>> def powerset(iterable): ... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" ... s = list(iterable) ... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1)) >>> def unique_everseen(iterable, key=None): ... "List unique elements, preserving order. Remember all elements ever seen." ... # unique_everseen('AAAABBBCCDAABBB') --> A B C D ... # unique_everseen('ABBCcAD', str.lower) --> A B C D ... seen = set() ... seen_add = seen.add ... if key is None: ... for element in iterable: ... if element not in seen: ... seen_add(element) ... yield element ... else: ... for element in iterable: ... k = key(element) ... if k not in seen: ... seen_add(k) ... yield element >>> def unique_justseen(iterable, key=None): ... "List unique elements, preserving order. Remember only the element just seen." ... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B ... # unique_justseen('ABBCcAD', str.lower) --> A B C A D ... return map(next, map(itemgetter(1), groupby(iterable, key))) This is not part of the examples but it tests to make sure the definitions perform as purported. >>> take(10, count()) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> list(enumerate('abc')) [(0, 'a'), (1, 'b'), (2, 'c')] >>> list(islice(tabulate(lambda x: 2*x), 4)) [0, 2, 4, 6] >>> nth('abcde', 3) 'd' >>> nth('abcde', 9) is None True >>> quantify(range(99), lambda x: x%2==0) 50 >>> a = [[1, 2, 3], [4, 5, 6]] >>> flatten(a) [1, 2, 3, 4, 5, 6] >>> list(repeatfunc(pow, 5, 2, 3)) [8, 8, 8, 8, 8] >>> import random >>> take(5, map(int, repeatfunc(random.random))) [0, 0, 0, 0, 0] >>> list(pairwise('abcd')) [('a', 'b'), ('b', 'c'), ('c', 'd')] >>> list(pairwise([])) [] >>> list(pairwise('a')) [] >>> list(islice(padnone('abc'), 0, 6)) ['a', 'b', 'c', None, None, None] >>> list(ncycles('abc', 3)) ['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'] >>> dotproduct([1,2,3], [4,5,6]) 32 >>> list(grouper(3, 'abcdefg', 'x')) [('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')] >>> list(roundrobin('abc', 'd', 'ef')) ['a', 'd', 'e', 'b', 'f', 'c'] >>> list(powerset([1,2,3])) [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] >>> all(len(list(powerset(range(n)))) == 2**n for n in range(18)) True >>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len) True >>> list(unique_everseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D'] >>> list(unique_everseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'D'] >>> list(unique_justseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D', 'A', 'B'] >>> list(unique_justseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'A', 'D'] """ __test__ = {'libreftest' : libreftest} def test_main(verbose=None): test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC, RegressionTests, LengthTransparency, SubclassWithKwargsTest, TestExamples) support.run_unittest(*test_classes) # verify reference counting if verbose and hasattr(sys, "gettotalrefcount"): import gc counts = [None] * 5 for i in range(len(counts)): support.run_unittest(*test_classes) gc.collect() counts[i] = sys.gettotalrefcount() print(counts) # doctest the examples in the library reference support.run_doctest(sys.modules[__name__], verbose) if __name__ == "__main__": test_main(verbose=True)
timm/timmnix
pypy3-v5.5.0-linux64/lib-python/3/test/test_itertools.py
Python
mit
83,293
[ "GULP" ]
a3f9f271e5c45f672325b49a4fc66dbaf80c9faab657b07b053e3ddd2d809967
""" Adds Skip and Timeout guards Copyright (c) 2009 John Markus Bjoerndalen <jmb@cs.uit.no>, Brian Vinter <vinter@nbi.dk>, Rune M. Friborg <rune.m.friborg@gmail.com>. See LICENSE.txt for licensing details (MIT License). """ # Imports import threading import uuid from pycsp.parallel.const import * from pycsp.parallel.protocol import AddrID, ChannelReq, LockMessenger from pycsp.parallel.dispatch import SocketDispatcher from pycsp.parallel.exceptions import * # Classes class Guard(object): """ The empty interface of a guard. """ def __init__(self, action=None): self.g = (self, action) # Id similar to channel name, to correctly select the chosen guard among the guard set. self.id = uuid.uuid1().hex self.id = self.id.encode() # Necessary to allow for correct locking self.dispatch = SocketDispatcher().getThread() self.dispatch.registerGuard(self.id) self.LM = LockMessenger(self.id) def _offer(self, req): try: # Acquire lock conn, state, seq = self.LM.remote_acquire_and_get_state(req.process) # Check sequence number if seq != req.seq_check: state = FAIL # Success? if (state == READY): self.LM.remote_notify(conn, req.process, req.ch_id, None) # Release lock self.LM.remote_release(conn, req.process) except AddrUnavailableException: # Unable to reach process during offer # The primary reason is probably because a request were part of an alting and the process have exited. if conf.get(SOCKETS_STRICT_MODE): raise FatalException("PyCSP unable to reach process during Guard.offer(%s)" % str(self.process)) else: sys.stderr.write("PyCSP unable to reach process during Guard.offer(%s)\n" % str(self.process)) def _close(self): # Invoked from Alternation self.dispatch.deregisterGuard(self.id) class SkipGuard(Guard): """ SkipGuard(action=None) SkipGuard will commit a successful communication the moment it is posted. Usage: >>> C = Channel() >>> Cin = C.reader() >>> (g, msg) = AltSelect(InputGuard(Cin), SkipGuard() ) SkipGuard(action=None) action An action may be provided as a string, a callable function object or a Choice object. The Choice object is the recommended use of action. A string: >>> action="L.append(channel_input)" The string passed to the action parameter is evaluted in the current namespace and can read variables, but can only write output by editing the content of existing mutable variables. Newly created immutable and mutable variables will only exist in the evalutation of this string. callable(func): >>> def func(channel_input=None) ... L.append(channel_input) >>> action=func The callable function object must accept one parameter for actions on InputGuards and must accept zero parameters for actions on OutputGuards. Choice: >>> @choice ... def func(L, channel_input=None) ... L.append(channel_input) >>> action=func(gatherList) The choice decorator can be used to make a Choice factory, which can generate actions with different parameters depending on the use case. See help(pycsp.choice) """ def __init__(self, action=None): Guard.__init__(self, action) # Offer instantly def _post_read(self, process, ack=False): proc_addr_id = AddrID(process.addr, process.id) self._offer(ChannelReq(self.LM, proc_addr_id, process.sequence_number, self.id)) # Send acknowledgement to process. (used to ensure prioritized select) if ack: self.LM.ack(proc_addr_id) def _post_write(self, process, msg, ask=False): raise InfoException("Can not use SkipGuard with msg") class TimeoutGuard(Guard): """ TimeoutGuard(seconds, action=None) TimeoutGuard spawns a timer thread, when posted. If removed before timeout, then the timer thread is cancelled. When the timer expires, the timer thread will commit a successful communication. Usage: >>> C = Channel() >>> Cin = C.reader() >>> (g, msg) = AltSelect( InputGuard(Cin), TimeoutGuard(seconds=0.5) ) TimeoutGuard(action=None) seconds Set the seconds to wait before timeout. eg. 0.5s action An action may be provided as a string, a callable function object or a Choice object. The Choice object is the recommended use of action. A string: >>> action="L.append(channel_input)" The string passed to the action parameter is evaluted in the current namespace and can read variables, but can only write output by editing the content of existing mutable variables. Newly created immutable and mutable variables will only exist in the evalutation of this string. callable(func): >>> def func(channel_input=None) ... L.append(channel_input) >>> action=func The callable function object must accept one parameter for actions on InputGuards and must accept zero parameters for actions on OutputGuards. Choice: >>> @choice ... def func(L, channel_input=None) ... L.append(channel_input) >>> action=func(gatherList) The choice decorator can be used to make a Choice factory, which can generate actions with different parameters depending on the use case. See help(pycsp.choice) """ def __init__(self, seconds, action=None): Guard.__init__(self, action) self.seconds = seconds self.posted_req = None self.timer_cancelled=False self.lock = threading.Lock() # Timer expired, offer an active Channel Request def _expire(self): self.lock.acquire() if not self.timer_cancelled: self._offer(self.posted_req) self.lock.release() def _post_read(self, process, ack=False): proc_addr_id = AddrID(process.addr, process.id) # Send acknowledgement to process. (used to ensure prioritized select) if ack: self.LM.ack(proc_addr_id) self.posted_req = ChannelReq(self.LM, proc_addr_id, process.sequence_number, self.id) self.timer = threading.Timer(self.seconds, self._expire) self.timer.start() def _close(self): self.lock.acquire() self.timer_cancelled=True Guard._close(self) self.lock.release() # Backwards compatibility Skip = SkipGuard Timeout = TimeoutGuard
runefriborg/pycsp
pycsp/parallel/guard.py
Python
mit
7,041
[ "Brian" ]
9dadfa8e7a8806042e9e4c8e883cd8efbbcc16e925cceb1c2001a92d92beaa74
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import logging import time import traceback import contextlib from collections import Mapping from distutils.version import LooseVersion from ..conventions import cf_encoder from ..core.utils import FrozenOrderedDict from ..core.pycompat import iteritems, dask_array_type try: from dask.utils import SerializableLock as Lock except ImportError: from threading import Lock # Create a logger object, but don't add any handlers. Leave that to user code. logger = logging.getLogger(__name__) NONE_VAR_NAME = '__values__' # dask.utils.SerializableLock if available, otherwise just a threading.Lock GLOBAL_LOCK = Lock() def _encode_variable_name(name): if name is None: name = NONE_VAR_NAME return name def _decode_variable_name(name): if name == NONE_VAR_NAME: name = None return name def find_root(ds): """ Helper function to find the root of a netcdf or h5netcdf dataset. """ while ds.parent is not None: ds = ds.parent return ds def robust_getitem(array, key, catch=Exception, max_retries=6, initial_delay=500): """ Robustly index an array, using retry logic with exponential backoff if any of the errors ``catch`` are raised. The initial_delay is measured in ms. With the default settings, the maximum delay will be in the range of 32-64 seconds. """ assert max_retries >= 0 for n in range(max_retries + 1): try: return array[key] except catch: if n == max_retries: raise base_delay = initial_delay * 2 ** n next_delay = base_delay + np.random.randint(base_delay) msg = ('getitem failed, waiting %s ms before trying again ' '(%s tries remaining). Full traceback: %s' % (next_delay, max_retries - n, traceback.format_exc())) logger.debug(msg) time.sleep(1e-3 * next_delay) class AbstractDataStore(Mapping): _autoclose = False def __iter__(self): return iter(self.variables) def __getitem__(self, key): return self.variables[key] def __len__(self): return len(self.variables) def get_attrs(self): # pragma: no cover raise NotImplementedError def get_variables(self): # pragma: no cover raise NotImplementedError def get_encoding(self): return {} def load(self): """ This loads the variables and attributes simultaneously. A centralized loading function makes it easier to create data stores that do automatic encoding/decoding. For example: class SuffixAppendingDataStore(AbstractDataStore): def load(self): variables, attributes = AbstractDataStore.load(self) variables = {'%s_suffix' % k: v for k, v in iteritems(variables)} attributes = {'%s_suffix' % k: v for k, v in iteritems(attributes)} return variables, attributes This function will be called anytime variables or attributes are requested, so care should be taken to make sure its fast. """ variables = FrozenOrderedDict((_decode_variable_name(k), v) for k, v in self.get_variables().items()) attributes = FrozenOrderedDict(self.get_attrs()) return variables, attributes @property def variables(self): # Because encoding/decoding might happen which may require both the # attributes and the variables, and because a store may be updated # we need to load both the attributes and variables # anytime either one is requested. variables, _ = self.load() return variables @property def attrs(self): # Because encoding/decoding might happen which may require both the # attributes and the variables, and because a store may be updated # we need to load both the attributes and variables # anytime either one is requested. _, attributes = self.load() return attributes @property def dimensions(self): return self.get_dimensions() def close(self): pass def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): self.close() class ArrayWriter(object): def __init__(self): self.sources = [] self.targets = [] def add(self, source, target): if isinstance(source, dask_array_type): self.sources.append(source) self.targets.append(target) else: try: target[...] = source except TypeError: # workaround for GH: scipy/scipy#6880 target[:] = source def sync(self): if self.sources: import dask.array as da import dask if LooseVersion(dask.__version__) > LooseVersion('0.8.1'): da.store(self.sources, self.targets, lock=GLOBAL_LOCK) else: da.store(self.sources, self.targets) self.sources = [] self.targets = [] class AbstractWritableDataStore(AbstractDataStore): def __init__(self, writer=None): if writer is None: writer = ArrayWriter() self.writer = writer def set_dimension(self, d, l): # pragma: no cover raise NotImplementedError def set_attribute(self, k, v): # pragma: no cover raise NotImplementedError def set_variable(self, k, v): # pragma: no cover raise NotImplementedError def sync(self): self.writer.sync() def store_dataset(self, dataset): # in stores variables are all variables AND coordinates # in xarray.Dataset variables are variables NOT coordinates, # so here we pass the whole dataset in instead of doing # dataset.variables self.store(dataset, dataset.attrs) def store(self, variables, attributes, check_encoding_set=frozenset(), unlimited_dims=None): self.set_attributes(attributes) self.set_variables(variables, check_encoding_set, unlimited_dims=unlimited_dims) def set_attributes(self, attributes): for k, v in iteritems(attributes): self.set_attribute(k, v) def set_variables(self, variables, check_encoding_set, unlimited_dims=None): for vn, v in iteritems(variables): name = _encode_variable_name(vn) check = vn in check_encoding_set target, source = self.prepare_variable( name, v, check, unlimited_dims=unlimited_dims) self.writer.add(source, target) def set_necessary_dimensions(self, variable, unlimited_dims=None): if unlimited_dims is None: unlimited_dims = set() for d, l in zip(variable.dims, variable.shape): if d not in self.dimensions: if d in unlimited_dims: l = None self.set_dimension(d, l) class WritableCFDataStore(AbstractWritableDataStore): def store(self, variables, attributes, *args, **kwargs): # All NetCDF files get CF encoded by default, without this attempting # to write times, for example, would fail. cf_variables, cf_attrs = cf_encoder(variables, attributes) AbstractWritableDataStore.store(self, cf_variables, cf_attrs, *args, **kwargs) class DataStorePickleMixin(object): """Subclasses must define `ds`, `_opener` and `_mode` attributes. Do not subclass this class: it is not part of xarray's external API. """ def __getstate__(self): state = self.__dict__.copy() del state['ds'] if self._mode == 'w': # file has already been created, don't override when restoring state['_mode'] = 'a' return state def __setstate__(self, state): self.__dict__.update(state) self.ds = self._opener(mode=self._mode) @contextlib.contextmanager def ensure_open(self, autoclose): """ Helper function to make sure datasets are closed and opened at appropriate times to avoid too many open file errors. Use requires `autoclose=True` argument to `open_mfdataset`. """ if self._autoclose and not self._isopen: try: self.ds = self._opener() self._isopen = True yield finally: if autoclose: self.close() else: yield def assert_open(self): if not self._isopen: raise AssertionError('internal failure: file must be open ' 'if `autoclose=True` is used.')
jhamman/xray
xarray/backends/common.py
Python
apache-2.0
9,144
[ "NetCDF" ]
f2c4e203b62a1ef24e402a489af9d4c1ffeb745a7e2fa39fed8f88afcfc5d4da
# th.po val = {" days." : "", "(all)" : "", "(any)" : "", "(anyone)" : "(ไม่มี)", "(available)" : "", "(blank)" : "", "(both)" : "", "(everyone)" : "", "(master user, not editable)" : "", "(no change)" : "", "(no deduction)" : "", "(none)" : "(ไม่มี)", "(unknown)" : "", "(use system)" : "", "({0} given, {1} remaining)" : "", "1 treatment" : "", "1 week" : "1 สัปดาห์", "1 year" : "", "2 weeks" : "2 สัปดาห์", "3 months" : "เดือน", "4 weeks" : "4 สัปดาห์", "5 Year" : "", "6 months" : "เดือน", "6 weeks" : "6 สัปดาห์", "8 weeks" : "8 สัปดาห์", "9 months" : "เดือน", "A (Stray Dog)" : "", "A description or other information about the animal" : "", "A list of areas this person will homecheck - eg: S60 S61" : "", "A movement must have a reservation date or type." : "", "A person is required for this movement type." : "", "A publish job is already running." : "", "A short version of the reference number" : "", "A task is already running." : "", "A unique number to identify this movement" : "", "A unique reference for this litter" : "", "A4" : "", "ACO" : "", "AM" : "", "ASM" : "", "ASM 3 is compatible with your iPad and other tablets." : "", "ASM News" : "", "ASM can track detailed monthly and annual figures for your shelter. Install the Monthly Figures and Annual Figures reports from Settings-Reports-Browse sheltermanager.com" : "", "ASM comes with a dictionary of 4,000 animal names. Just click the generate random name button when adding an animal." : "", "ASM will remove this animal from the waiting list after a set number of weeks since the last owner contact date." : "", "Abandoned" : "", "Abuse" : "", "Abyssinian" : "", "Access System Menu" : "", "Account" : "", "Account Types" : "", "Account code '{0}' has already been used." : "", "Account code '{0}' is not valid." : "", "Account code cannot be blank." : "", "Account disabled." : "", "Accountant" : "", "Accounts" : "", "Accounts need a code." : "", "Active" : "", "Active Incidents" : "", "Active Trap Loans" : "", "Active users: {0}" : "", "Add" : "", "Add Accounts" : "", "Add Animal" : "เพิ่มสัตว์", "Add Animals" : "เพิ่มสัตว์", "Add Appointment" : "", "Add Call" : "", "Add Citations" : "", "Add Clinic Appointment" : "", "Add Cost" : "", "Add Diary" : "", "Add Diets" : "", "Add Document to Repository" : "", "Add Flag" : "", "Add Found Animal" : "สัตว์ที่พบเจอ", "Add Incidents" : "", "Add Investigation" : "", "Add Invoice Item" : "", "Add Licenses" : "", "Add Litter" : "", "Add Log" : "", "Add Log to Animal" : "สัตว์ที่พบเจอ", "Add Lost Animal" : "สัตว์ที่พบเจอ", "Add Media" : "", "Add Medical Records" : "", "Add Message" : "", "Add Movement" : "", "Add Payments" : "", "Add Person" : "", "Add Report" : "", "Add Rota" : "", "Add Stock" : "", "Add Tests" : "", "Add Transport" : "", "Add Trap Loans" : "", "Add Users" : "", "Add Vaccinations" : "", "Add Vouchers" : "", "Add Waiting List" : "ดูรายชื่อที่กำลังรอ", "Add a diary note" : "", "Add a found animal" : "สัตว์ที่พบเจอ", "Add a log entry" : "", "Add a lost animal" : "สัตว์ที่พบเจอ", "Add a medical regimen" : "", "Add a new animal" : "สัตว์ที่พบเจอ", "Add a new log" : "", "Add a new person" : "", "Add a person" : "", "Add a photo" : "", "Add a test" : "", "Add a vaccination" : "", "Add account" : "", "Add additional field" : "", "Add an animal to the waiting list" : "", "Add citation" : "", "Add cost" : "", "Add details of this email to the log after sending" : "", "Add diary" : "", "Add diary task" : "", "Add diet" : "", "Add extra images for use in reports and documents" : "", "Add form field" : "", "Add found animal" : "สัตว์ที่พบเจอ", "Add investigation" : "", "Add license" : "", "Add litter" : "", "Add log" : "", "Add lost animal" : "สัตว์ที่พบเจอ", "Add medical profile" : "", "Add medical regimen" : "", "Add message" : "", "Add movement" : "", "Add online form" : "", "Add payment" : "", "Add person" : "", "Add report" : "", "Add role" : "", "Add rota item" : "", "Add stock" : "", "Add template" : "", "Add test" : "", "Add this text to all animal descriptions" : "", "Add to log" : "", "Add transport" : "", "Add trap loan" : "", "Add user" : "", "Add vaccination" : "", "Add voucher" : "", "Add waiting list" : "ดูรายชื่อที่กำลังรอ", "Add {0}" : "", "Added" : "", "Added by {0} on {1}" : "", "Additional" : "เพิ่มเติม", "Additional Fields" : "", "Additional date field '{0}' contains an invalid date." : "", "Additional fields" : "", "Additional fields need a name, label and type." : "", "Address" : "ที่อยู่", "Address Contains" : "", "Address contains" : "", "Administered" : "", "Administering Vet" : "", "Adopt" : "", "Adopt an animal" : "", "Adoptable" : "", "Adoptable Animal" : "", "Adoptable and published for the first time" : "", "Adopted" : "", "Adopted Animals" : "เพิ่มสัตว์", "Adopted Transferred In {0}" : "", "Adoption" : "", "Adoption Coordinator" : "", "Adoption Coordinator and Fosterer" : "", "Adoption Event" : "", "Adoption Fee" : "", "Adoption Number" : "", "Adoption fee donations" : "", "Adoption movements must have a valid adoption date." : "", "Adoption successfully created." : "", "Adoptions {0}" : "", "Adult" : "", "Advanced" : "อย่างขั้นสูง", "Advanced find animal screen defaults to on shelter" : "", "Affenpinscher" : "", "Afghan Hound" : "", "African Grey" : "", "After the user presses submit and ASM has accepted the form, redirect the user to this URL" : "", "Age" : "อายุ", "Age Group" : "", "Age Group 1" : "", "Age Group 2" : "", "Age Group 3" : "", "Age Group 4" : "", "Age Group 5" : "", "Age Group 6" : "", "Age Group 7" : "", "Age Group 8" : "", "Age Groups" : "", "Age groups are assigned based on the age of an animal. The figure in the left column is the upper limit in years for that group." : "", "Aged Between" : "", "Aged From" : "", "Aged To" : "", "Aggression" : "", "Airedale Terrier" : "", "Akbash" : "", "Akita" : "", "Alaskan Malamute" : "", "Alerts" : "", "All Animals" : "เพิ่มสัตว์", "All On-Shelter Animals" : "ลบสัตว์", "All Publishers" : "", "All accounts" : "", "All animal care officers on file." : "", "All animal shelters on file." : "", "All animals matching current publishing options." : "", "All animals on the shelter." : "", "All animals where the hold ends today." : "", "All animals who are currently held in case of reclaim." : "", "All animals who are currently quarantined." : "", "All animals who are flagged as not for adoption." : "", "All animals who have been on the shelter longer than {0} months." : "", "All animals who have not been microchipped" : "", "All banned owners on file." : "", "All diary notes" : "", "All donors on file." : "", "All drivers on file." : "", "All existing data in your database will be REMOVED before importing the CSV file. This removal cannot be reversed." : "", "All fields should be completed." : "", "All fosterers on file." : "", "All homechecked owners on file." : "", "All homecheckers on file." : "", "All members on file." : "", "All notes upto today" : "", "All people on file." : "", "All retailers on file." : "", "All staff on file." : "", "All time" : "", "All vets on file." : "", "All volunteers on file." : "", "Allergies" : "", "Allow a fosterer to be selected" : "", "Allow an adoption coordinator to be selected" : "", "Allow creation of payments on the Move-Reserve screen" : "", "Allow drag and drop to move animals between locations" : "", "Allow duplicate license numbers" : "", "Allow duplicate microchip numbers" : "", "Allow overriding of the movement number on the Move menu screens" : "", "Allow use of OpenOffice document templates" : "", "Alphabetically A-Z" : "", "Alphabetically Z-A" : "", "Already Signed" : "", "Already fostered to this person." : "", "Altered" : "", "Altered Date" : "", "Altered Dog - 1 year" : "", "Altered Dog - 3 year" : "", "Altering Vet" : "", "Always show an emblem to indicate the current location" : "", "Amazon" : "", "Amber" : "", "American" : "อเมริกัน", "American Bulldog" : "", "American Curl" : "", "American Eskimo Dog" : "", "American Fuzzy Lop" : "", "American Sable" : "", "American Shorthair" : "", "American Staffordshire Terrier" : "", "American Water Spaniel" : "", "American Wirehair" : "", "Amount" : "", "An age in years, eg: 1, 0.5" : "", "An animal cannot have multiple open movements." : "", "An optional comma separated list of email addresses to send the output of this report to" : "", "Anatolian Shepherd" : "", "Angora Rabbit" : "", "Animal" : "สัตว์", "Animal '{0}' created with code {1}" : "", "Animal '{0}' successfully marked deceased." : "", "Animal (optional)" : "", "Animal (via animalname field)" : "", "Animal - Additional" : "", "Animal - Death" : "", "Animal - Details" : "", "Animal - Entry" : "", "Animal - Health and Identification" : "", "Animal - Notes" : "", "Animal Codes" : "ประเภทของสัตว์", "Animal Control" : "", "Animal Control Caller" : "", "Animal Control Incident" : "", "Animal Control Officer" : "", "Animal Control Victim" : "", "Animal Emblems" : "", "Animal Flags" : "", "Animal Links" : "", "Animal Name" : "ชื่อสัตว์", "Animal Selection" : "", "Animal Shelter Manager" : "", "Animal Shelter Manager Login" : "", "Animal Sponsorship" : "", "Animal Type" : "ประเภทของสัตว์", "Animal Types" : "ประเภทของสัตว์", "Animal board costs" : "", "Animal cannot be deceased before it was brought to the shelter" : "", "Animal code format" : "", "Animal comments MUST contain this phrase in order to match." : "", "Animal control calendar" : "", "Animal control incidents matching '{0}'." : "", "Animal defecation" : "", "Animal descriptions" : "", "Animal destroyed" : "", "Animal emblems are the little icons that appear next to animal names in shelter view, the home page and search results." : "", "Animal food costs" : "", "Animal picked up" : "", "Animal shortcode format" : "", "Animals" : "สัตว์", "Animals at large" : "", "Animals left in vehicle" : "", "Animals matching '{0}'." : "", "Animals per page" : "", "Annual" : "รายปี", "Annually" : "รายปี", "Anonymize" : "", "Anonymize personal data after this many years" : "", "Any animal types, species, breeds, colors, locations, etc. in the CSV file that aren't already in the database will be created during the import." : "", "Any health problems the animal has" : "", "Any information about the animal" : "", "Any markings or distinguishing features the animal has" : "", "Appaloosa" : "", "Appenzell Mountain Dog" : "", "Applehead Siamese" : "", "Appointment" : "", "Appointment date must be a valid date" : "", "Appointment {0}. {1} on {2} for {3}" : "", "Appointments need a date and time." : "", "Approved" : "", "Apr" : "เม.ย.", "April" : "เมษายน", "Arabian" : "", "Area" : "", "Area Found" : "เขตที่พบ", "Area Lost" : "", "Area Postcode" : "", "Area where the animal was found" : "", "Area where the animal was lost" : "", "Areas" : "", "Arrived" : "", "Asset" : "", "Asset::Premises" : "", "At least the last name should be completed." : "", "Attach" : "", "Attach File" : "", "Attach Link" : "", "Attach a file" : "", "Attach a link to a web resource" : "", "Attach link" : "", "Audit Trail" : "", "Aug" : "ส.ค.", "August" : "สิงหาคม", "Australian Cattle Dog/Blue Heeler" : "", "Australian Kelpie" : "", "Australian Shepherd" : "", "Australian Terrier" : "", "Auto log users out after this many minutes of inactivity" : "", "Auto removed due to lack of owner contact." : "", "Automatically cancel any outstanding reservations on an animal when it is adopted" : "", "Automatically remove" : "", "Automatically return any outstanding foster movements on an animal when it is adopted" : "", "Automatically return any outstanding foster movements on an animal when it is transferred" : "", "Available for adoption" : "", "Available sheltermanager.com reports" : "", "B (Boarding Animal)" : "", "Baby" : "", "Balance" : "", "Balinese" : "", "Bank" : "อันดับ", "Bank account interest" : "", "Bank current account" : "", "Bank deposit account" : "", "Bank savings account" : "", "Bank::Current" : "", "Bank::Deposit" : "", "Bank::Savings" : "", "Banned" : "", "Base Color" : "", "Basenji" : "", "Basset Hound" : "", "Batch" : "มีนาคม", "Batch Number" : "", "Beagle" : "", "Bearded Collie" : "", "Beauceron" : "", "Bedlington Terrier" : "", "Beginning of month" : "", "Belgian Hare" : "", "Belgian Shepherd Dog Sheepdog" : "", "Belgian Shepherd Laekenois" : "", "Belgian Shepherd Malinois" : "", "Belgian Shepherd Tervuren" : "", "Bengal" : "", "Bernese Mountain Dog" : "", "Beveren" : "", "Bichon Frise" : "", "Bird" : "นก", "Birman" : "", "Bite" : "ขนาด", "Biting" : "", "Black" : "ดำ", "Black Labrador Retriever" : "", "Black Mouth Cur" : "", "Black Tortie" : "", "Black and Brindle" : "", "Black and Brown" : "ดำและน้ำตาล", "Black and Tan" : "", "Black and Tan Coonhound" : "", "Black and White" : "ดำและขาว", "Bloodhound" : "", "Blue" : "น้ำเงิน", "Blue Tortie" : "", "Bluetick Coonhound" : "", "Board and Food" : "", "Boarding" : "", "Boarding Cost" : "", "Boarding cost type" : "", "Bobtail" : "", "Body" : "", "Bombay" : "", "Bonded" : "", "Bonded With" : "", "Books" : "", "Border Collie" : "", "Border Terrier" : "", "Bordetella" : "", "Born in Shelter" : "", "Born on Foster {0}" : "", "Born on Shelter {0}" : "", "Borzoi" : "", "Boston Terrier" : "", "Both" : "", "Bouvier des Flanders" : "", "Boxer" : "", "Boykin Spaniel" : "", "Breed" : "สายพันธุ์", "Breed to use when publishing to third party services and adoption sites" : "", "Breeds" : "สายพันธุ์", "Briard" : "", "Brindle" : "", "Brindle and Black" : "", "Brindle and White" : "", "Britannia Petite" : "", "British Shorthair" : "", "Brittany Spaniel" : "", "Brotogeris" : "", "Brought In" : "", "Brought In By" : "", "Brown" : "น้ำตาล", "Brown and Black" : "น้ำตาลและดำ", "Brown and White" : "น้ำตาลและขาว", "Browse sheltermanager.com" : "", "Browse sheltermanager.com and install some reports, charts and mail merges into your new system." : "", "Brussels Griffon" : "", "Budgie/Budgerigar" : "", "Bulk Complete Diary" : "", "Bulk Complete Medical Records" : "", "Bulk Complete Vaccinations" : "", "Bulk Complete Waiting List" : "", "Bulk Regimen" : "", "Bulk Test" : "", "Bulk Transport" : "", "Bulk Vaccination" : "", "Bulk change animals" : "", "Bull Terrier" : "", "Bullmastiff" : "", "Bunny Rabbit" : "", "Burmese" : "", "Burmilla" : "", "By" : "", "CC" : "", "CSV of animal/adopter data" : "", "CSV of animal/medical data" : "", "CSV of incident data" : "", "CSV of license data" : "", "CSV of payment data" : "", "CSV of person data" : "", "Caique" : "", "Cairn Terrier" : "", "Calendar View" : "", "Calendar view" : "", "Calico" : "", "Californian" : "", "Call" : "", "Call Date/Time" : "", "Caller" : "", "Caller Name" : "", "Caller Phone" : "", "Camel" : "", "Can Login" : "เข้าสู่ระบบ", "Can afford donation?" : "", "Can't reserve an animal that has an active movement." : "", "Canaan Dog" : "", "Canadian Hairless" : "", "Canary" : "", "Cancel" : "ยกเลิก", "Cancel holds on animals this many days after the brought in date, or 0 to never cancel" : "", "Cancel unadopted reservations after" : "", "Cancel unadopted reservations after this many days, or 0 to never cancel" : "", "Cancelled" : "", "Cancelled Reservation" : "", "Cane Corso Mastiff" : "", "Carolina Dog" : "", "Cash" : "", "Cat" : "แมว", "Catahoula Leopard Dog" : "", "Category" : "หมวดหมู่", "Cats" : "แมว", "Cattery" : "", "Cattle Dog" : "", "Cavalier King Charles Spaniel" : "", "Cell" : "", "Cell Phone" : "", "Champagne D'Argent" : "", "Change" : "", "Change Accounts" : "", "Change Animals" : "", "Change Citations" : "", "Change Clinic Apointment" : "", "Change Cost" : "", "Change Date Required" : "", "Change Diets" : "", "Change Found Animal" : "", "Change Incidents" : "", "Change Investigation" : "", "Change Licenses" : "", "Change Litter" : "", "Change Log" : "", "Change Lost Animal" : "", "Change Media" : "", "Change Medical Records" : "", "Change Movement" : "", "Change Password" : "", "Change Payments" : "", "Change Person" : "", "Change Publishing Options" : "", "Change Report" : "", "Change Rota" : "", "Change Stock" : "", "Change System Options" : "", "Change Tests" : "", "Change Transactions" : "", "Change Transport" : "", "Change Trap Loans" : "", "Change User Settings" : "", "Change Vaccinations" : "", "Change Vouchers" : "", "Change Waiting List" : "", "Change date required on selected treatments" : "", "Changed Mind" : "", "Chart" : "", "Chart (Bar)" : "", "Chart (Line)" : "", "Chart (Pie)" : "", "Chart (Point)" : "", "Chart (Steps)" : "", "Chartreux" : "", "Check" : "", "Check License" : "", "Check No" : "", "Checkbox" : "", "Checked By" : "", "Checkered Giant" : "", "Cheque" : "", "Chesapeake Bay Retriever" : "", "Chicken" : "", "Chihuahua" : "", "Children" : "", "Chinchilla" : "", "Chinese Crested Dog" : "", "Chinese Foo Dog" : "", "Chlamydophila" : "", "Chocolate" : "", "Chocolate Labrador Retriever" : "", "Chocolate Tortie" : "", "Chow Chow" : "", "Cinnamon" : "", "Cinnamon Tortoiseshell" : "", "Citation Type" : "", "Citation Types" : "", "Citations" : "", "City" : "", "City contains" : "", "Class" : "", "Clear" : "เคลียร์", "Clear and sign again" : "", "Clear tables before importing" : "", "Clinic" : "", "Clinic Calendar" : "", "Clinic Invoice - {0}" : "", "Clinic Statuses" : "", "Clone" : "ปิด", "Clone Animals" : "", "Clone Rota" : "", "Clone the rota this week to another week" : "", "Cloning..." : "กำลังจัดเรียง...", "Close" : "ปิด", "Clumber Spaniel" : "", "Clydesdale" : "", "Coat" : "แมว", "Coat Type" : "", "Coat Types" : "", "Cockapoo" : "", "Cockatiel" : "", "Cockatoo" : "", "Cocker Spaniel" : "", "Code" : "โค้ด", "Code contains" : "", "Code format tokens:" : "", "Collie" : "", "Color" : "", "Color to use when publishing to third party services and adoption sites" : "", "Colors" : "", "Columns" : "", "Columns displayed" : "", "Comma separated list of units for this location, eg: 1,2,3,4,Isolation,Pen 5" : "", "Comments" : "คอมเม้นต์", "Comments Contain" : "", "Comments contain" : "", "Comments copied to web preferred media." : "", "Complaint" : "", "Complete" : "เสร็จแล้ว", "Complete Tasks" : "", "Completed" : "เสร็จแล้ว", "Completed Between" : "", "Completed Type" : "", "Completed notes upto today" : "", "Completion Date" : "", "Completion Type" : "", "Configuration" : "", "Confirm" : "", "Confirm Password" : "", "Confirmation message" : "", "Confirmed" : "", "Consulting Room" : "", "Consulting Room - {0}" : "", "Consumed" : "", "Contact" : "ติดต่อ", "Contact Contains" : "", "Conure" : "", "Convert this reservation to an adoption" : "", "Coonhound" : "", "Copy animal comments to the notes field of the web preferred media for this animal" : "", "Copy from animal comments" : "", "Copy of {0}" : "สำเนาของ {0}", "Corded" : "", "Corgi" : "", "Cornish Rex" : "", "Cost" : "แมว", "Cost For" : "", "Cost Type" : "", "Cost Types" : "", "Cost date must be a valid date" : "", "Cost record" : "", "Costs" : "แมว", "Costs need a date and amount." : "", "Coton de Tulear" : "", "Could not find animal with name '{0}'" : "", "Country" : "ประเทศ:", "Courtesy Listing" : "", "Cow" : "", "Cream" : "ครีม", "Create" : "", "Create Animal" : "", "Create Log" : "", "Create Payment" : "สร้างโดย", "Create Waiting List" : "", "Create a cost record" : "", "Create a due or received payment record from this appointment" : "", "Create a new animal by copying this one" : "", "Create a new animal from this found animal record" : "", "Create a new animal from this incident" : "", "Create a new animal from this waiting list entry" : "", "Create a new document" : "", "Create a new template" : "", "Create a new template by copying the selected template" : "", "Create a new waiting list entry from this found animal record" : "", "Create and edit" : "", "Create boarding cost record when animal is adopted" : "", "Create diary notes from a task" : "", "Create missing lookup values" : "", "Create note this many days from today, or 9999 to ask" : "", "Create this message" : "", "Create this person" : "", "Created By" : "สร้างโดย", "Creating cost and cost types creates matching accounts and transactions" : "", "Creating payments and payments types creates matching accounts and transactions" : "", "Creating..." : "", "Credit Card" : "", "Creme D'Argent" : "", "Criteria:" : "", "Crossbreed" : "", "Cruelty Case" : "", "Culling" : "", "Curly" : "", "Current" : "", "Current Vet" : "", "Cymric" : "", "D (Dog)" : "", "DD = current day" : "", "DDL dump (DB2)" : "", "DDL dump (MySQL)" : "", "DDL dump (PostgreSQL)" : "", "DHLPP" : "", "DO NOT use this field to store notes about what the person is looking for." : "", "DOA {0}" : "", "DOB" : "", "Dachshund" : "", "Daily Boarding Cost" : "", "Dalmatian" : "", "Dandi Dinmont Terrier" : "", "Data" : "วันที่", "Data Protection" : "", "Database" : "ฐานข้อมูล", "Date" : "วันที่", "Date '{0}' is not valid." : "", "Date Brought In" : "", "Date Found" : "วันที่พบ", "Date Lost" : "", "Date Of Birth" : "", "Date Put On" : "", "Date Removed" : "วันที่เอาออก", "Date Reported" : "วันที่เอาออก", "Date and notes are mandatory." : "", "Date brought in cannot be blank" : "", "Date brought in cannot be in the future." : "", "Date brought in is not valid" : "", "Date found cannot be blank" : "", "Date found cannot be blank." : "", "Date lost cannot be blank" : "", "Date lost cannot be blank." : "", "Date of Birth" : "", "Date of birth cannot be blank" : "", "Date of birth cannot be in the future." : "", "Date of birth is not valid" : "", "Date of last owner contact" : "", "Date put on" : "", "Date put on cannot be blank" : "", "Date put on list" : "", "Date removed" : "วันที่เอาออก", "Date reported cannot be blank" : "", "Date reported cannot be blank." : "", "Date/Time" : "วันที่/เวลา", "Day" : "", "Day Pivot" : "", "Days On Shelter" : "", "Dead On Arrival" : "", "Dead animal" : "", "Dead on arrival" : "", "Death" : "", "Death Comments" : "", "Death Reason" : "เหตุผลที่เสียชีวิต", "Death Reasons" : "เหตุผลที่เสียชีวิต", "Debit Card" : "", "Dec" : "ธ.ค.", "Deceased" : "", "Deceased Date" : "", "December" : "ธันวาคม", "Declawed" : "", "Declined" : "", "Default Breed" : "", "Default Brought In By" : "", "Default Coat Type" : "", "Default Color" : "", "Default Cost" : "", "Default Death Reason" : "", "Default Diary Person" : "", "Default Entry Reason" : "", "Default Incident Type" : "", "Default Location" : "", "Default Log Filter" : "", "Default Log Type" : "", "Default Payment Method" : "", "Default Payment Type" : "", "Default Reservation Status" : "", "Default Return Reason" : "", "Default Rota Shift" : "", "Default Size" : "", "Default Species" : "", "Default Test Type" : "", "Default Type" : "", "Default Vaccination Type" : "", "Default Value" : "", "Default daily boarding cost" : "", "Default destination account for payments" : "", "Default image for documents" : "", "Default image for this record and the web" : "", "Default source account for costs" : "", "Default to advanced find animal screen" : "", "Default to advanced find person screen" : "", "Default transaction view" : "", "Default urgency" : "", "Default video for publishing" : "", "Default view" : "", "Defaults" : "ค่าปริยาย", "Defaults formats for code and shortcode are TYYYYNNN and NNT" : "", "Delete" : "ลบ", "Delete Accounts" : "", "Delete Animals" : "ลบสัตว์", "Delete Citations" : "", "Delete Clinic Appointment" : "", "Delete Cost" : "", "Delete Diary" : "", "Delete Diets" : "", "Delete Document from Repository" : "", "Delete Found Animal" : "", "Delete Incidents" : "", "Delete Incoming Forms" : "", "Delete Investigation" : "", "Delete Licenses" : "", "Delete Litter" : "", "Delete Log" : "", "Delete Lost Animal" : "", "Delete Media" : "", "Delete Medical Records" : "", "Delete Movement" : "", "Delete Payments" : "", "Delete Person" : "", "Delete Regimen" : "", "Delete Report" : "", "Delete Rota" : "", "Delete Stock" : "", "Delete Tests" : "", "Delete Transport" : "", "Delete Trap Loans" : "", "Delete Treatments" : "", "Delete Vaccinations" : "", "Delete Vouchers" : "", "Delete Waiting List" : "", "Delete all rota entries for this week" : "", "Delete this animal" : "", "Delete this incident" : "", "Delete this person" : "", "Delete this record" : "", "Delete this waiting list entry" : "", "Denied" : "", "Deposit" : "", "Deposit Account" : "", "Deposit Returned" : "", "Description" : "คำอธิบาย", "Description Contains" : "", "Description cannot be blank" : "", "Deselect" : "", "Details" : "รายละเอียด", "Devon Rex" : "", "Dialog title" : "", "Diary" : "", "Diary Task" : "", "Diary Task: {0}" : "", "Diary Tasks" : "", "Diary and Messages" : "", "Diary calendar" : "", "Diary date cannot be blank" : "", "Diary date is not valid" : "", "Diary for {0}" : "", "Diary note cannot be blank" : "", "Diary note {0} marked completed" : "", "Diary note {0} rediarised for {1}" : "", "Diary notes for: {0}" : "", "Diary notes need a date and subject." : "", "Diary subject cannot be blank" : "", "Diary task items need a pivot, subject and note." : "", "Diary tasks need a name." : "", "Did not ask" : "", "Did you know?" : "", "Died" : "", "Died off shelter" : "", "Died {0}" : "", "Diet" : "", "Diets" : "", "Diets need a start date." : "", "Dispatch" : "", "Dispatch Address" : "", "Dispatch Between" : "", "Dispatch Date/Time" : "", "Dispatch {0}: {1}" : "", "Dispatched ACO" : "", "Display" : "", "Display Index" : "", "Display a search button at the right side of the search box" : "", "Distemper" : "", "Do Not Publish" : "เผยแพร่", "Do Not Register Microchip" : "", "Do not show" : "", "Doberman Pinscher" : "", "Document" : "", "Document Link" : "", "Document Repository" : "", "Document Templates" : "", "Document file" : "", "Document signed" : "", "Document signing request" : "", "Document templates" : "", "Documents" : "", "Dog" : "สุนัข", "Dogo Argentino" : "", "Dogs" : "สุนัข", "Dogue de Bordeaux" : "", "Domestic Long Hair" : "", "Domestic Medium Hair" : "", "Domestic Short Hair" : "", "Don't create a cost record" : "", "Don't scale" : "", "Donated" : "", "Donation" : "บริจาค", "Donation?" : "บริจาคหรือเปล่า?", "Donations for animals entering the shelter" : "", "Done" : "ไม่มี", "Donkey" : "", "Donkey/Mule" : "", "Donor" : "", "Dosage" : "", "Dove" : "ไม่มี", "Download" : "", "Draft" : "", "Driver" : "", "Drop files here..." : "", "Dropoff" : "", "Duck" : "เป็ด", "Due" : "", "Due in next month" : "", "Due in next week" : "", "Due in next year" : "", "Due today" : "", "Duration" : "ช่วงระยะ", "Dutch" : "", "Dutch Shepherd" : "", "Dwarf" : "", "Dwarf Eared" : "", "E = first letter of animal entry category" : "", "EE = first and second letter of animal entry category" : "", "Eclectus" : "", "Edit" : "แก้ไข", "Edit All Diary Notes" : "", "Edit Appointment" : "", "Edit Diary Tasks" : "", "Edit HTML publishing templates" : "", "Edit Header/Footer" : "", "Edit Invoice Item" : "", "Edit Lookups" : "", "Edit My Diary Notes" : "", "Edit Online Forms" : "", "Edit Reports" : "", "Edit Roles" : "", "Edit Users" : "", "Edit account" : "", "Edit additional field" : "", "Edit citation" : "", "Edit cost" : "", "Edit diary" : "", "Edit diary notes" : "", "Edit diary task" : "", "Edit diary tasks" : "", "Edit diet" : "", "Edit document" : "", "Edit form field" : "", "Edit investigation" : "", "Edit invoice" : "แก้ไขเจ้าของ", "Edit license" : "", "Edit litter" : "", "Edit litters" : "", "Edit log" : "", "Edit media notes" : "", "Edit medical profile" : "", "Edit medical regimen" : "", "Edit movement" : "", "Edit my diary notes" : "", "Edit my diary notes" : "", "Edit notes" : "", "Edit online form" : "", "Edit online form HTML header/footer" : "", "Edit payment" : "", "Edit report" : "", "Edit report template HTML header/footer" : "", "Edit role" : "", "Edit roles" : "", "Edit rota item" : "", "Edit stock" : "", "Edit system users" : "", "Edit template" : "", "Edit test" : "", "Edit the current waiting list" : "", "Edit transaction" : "", "Edit transport" : "", "Edit trap loan" : "", "Edit user" : "", "Edit vaccination" : "", "Edit voucher" : "", "Edit {0}" : "", "Egyptian Mau" : "", "Electricity Bills" : "", "Email" : "อีเมล์", "Email Address" : "", "Email PDF" : "", "Email Person" : "", "Email To" : "", "Email a copy of the selected HTML documents as PDFs" : "", "Email a copy of the selected media files" : "", "Email address" : "", "Email document for electronic signature" : "", "Email incident notes to ACO" : "", "Email incoming form submissions to this comma separated list of email addresses" : "", "Email media" : "", "Email person" : "", "Email signature" : "", "Email submissions to" : "", "Email this message to all matching users" : "", "Email this person" : "", "Email users their diary notes each day" : "", "Emu" : "อีมู", "Enable FTP uploading" : "", "Enable accounts functionality" : "", "Enable location filters" : "", "Enable lost and found functionality" : "", "Enable multiple sites" : "", "Enable the waiting list functionality" : "", "Enable visual effects" : "", "Enabled" : "", "End Of Day" : "", "End Time" : "", "End at" : "", "End of month" : "", "End of year" : "", "Ends" : "", "Ends after" : "", "English Bulldog" : "", "English Cocker Spaniel" : "", "English Coonhound" : "", "English Lop" : "", "English Pointer" : "", "English Setter" : "", "English Shepherd" : "", "English Spot" : "", "English Springer Spaniel" : "", "English Toy Spaniel" : "", "Entered (newest first)" : "", "Entered (oldest first)" : "", "Entered From" : "", "Entered To" : "", "Entered shelter" : "", "Entering 'activelost' or 'activefound' in the search box will show you lost and found animals reported in the last 30 days." : "", "Entering 'deceased' in the search box will show you recently deceased animals." : "", "Entering 'fosterers', 'homecheckers', 'staff', 'volunteers', 'aco' or 'members' in the search box will show you those groups of people." : "", "Entering 'notforadoption' in the search box will show you all shelter animals with the not for adoption flag set." : "", "Entering 'os' in the search box will show you all shelter animals." : "", "Entlebucher" : "", "Entry" : "", "Entry Category" : "", "Entry Donation" : "", "Entry Reason" : "", "Entry Reason Category" : "", "Entry Reasons" : "", "Entry reason" : "", "Error contacting server." : "", "Escaped" : "", "Escaped {0}" : "", "Eskimo Dog" : "", "Estimate" : "", "Euthanized" : "", "Euthanized {0}" : "", "Every day" : "", "Exclude animals who are aged under" : "", "Exclude from bulk email" : "", "Exclude new animal photos from publishing" : "", "Exclude this image when publishing" : "", "Execute" : "", "Execute Script" : "", "Execute the SQL in the box below" : "", "Executing Task" : "", "Executing..." : "", "Exotic Shorthair" : "", "Expense" : "", "Expense::" : "", "Expenses::Board" : "", "Expenses::Electricity" : "", "Expenses::Food" : "", "Expenses::Gas" : "", "Expenses::Phone" : "", "Expenses::Postage" : "", "Expenses::Stationary" : "", "Expenses::Water" : "", "Expire in next month" : "", "Expired" : "", "Expired in the last month" : "", "Expired in the last week" : "", "Expires" : "", "Expiry" : "", "Expiry date" : "", "Export" : "", "Export Animals as CSV" : "", "Export Report" : "", "Export Reports as CSV" : "", "Export a CSV file of animal records that ASM can import into another database." : "", "Export this database in various formats" : "", "Exporting the complete database can take some time and generate a very large file, are you sure?" : "", "Extra Images" : "", "Extra images" : "", "Extra-Toes Cat (Hemingway Polydactyl)" : "", "F (Feral Cat)" : "", "FECV/FeCoV" : "", "FIPV" : "", "FIV" : "", "FIV Result" : "", "FIV+" : "", "FIV/L Test Date" : "", "FIV/L Tested" : "", "FLV" : "", "FLV Result" : "", "FLV+" : "", "FTP hostname" : "", "FTP password" : "", "FTP username" : "", "FVRCP" : "", "Facebook" : "", "Failed sending email" : "", "Failed to create payment." : "", "Failed to renew license." : "", "Fawn" : "", "Fawn Tortoiseshell" : "", "FeLV" : "", "Features" : "", "Feb" : "ก.พ.", "February" : "กุมภาพันธ์", "Fee" : "", "Female" : "เพศเมีย", "Feral" : "", "Ferret" : "", "Field Spaniel" : "", "Field names should not contain spaces." : "", "Fila Brasileiro" : "", "File" : "", "Filter" : "", "Financial" : "", "Finch" : "", "Find Animal" : "ค้นหาสัตว์", "Find Animal/Person" : "", "Find Found Animal" : "สัตว์ที่พบเจอ", "Find Incident" : "", "Find Lost Animal" : "สัตว์ที่พบเจอ", "Find Person" : "", "Find a found animal" : "สัตว์ที่พบเจอ", "Find a lost animal" : "สัตว์ที่พบเจอ", "Find aco" : "", "Find an incident" : "", "Find animal" : "ค้นหาสัตว์", "Find animal columns" : "", "Find animal control incidents returned {0} results." : "", "Find animals matching the looking for criteria of this person" : "", "Find donor" : "", "Find driver" : "", "Find fosterer" : "", "Find found animal returned {0} results." : "", "Find homechecked" : "", "Find homechecker" : "", "Find incident" : "", "Find lost animal returned {0} results." : "", "Find member" : "", "Find person" : "", "Find person columns" : "", "Find retailer" : "", "Find shelter" : "", "Find staff" : "", "Find staff/volunteer" : "", "Find this address on a map" : "", "Find vet" : "", "Find volunteer" : "", "Fine Amount" : "", "Finnish Lapphund" : "", "Finnish Spitz" : "", "First Last" : "", "First Names" : "", "First name(s)" : "", "First offence" : "", "Fish" : "ปลา", "Flag" : "", "Flags" : "", "Flat-coated Retriever" : "", "Flemish Giant" : "", "Florida White" : "", "Followup" : "", "Followup Between" : "", "Followup Date/Time" : "", "Footer" : "", "For" : "", "Forbidden" : "", "Forenames" : "", "Forget" : "", "Form URL" : "", "Forms need a name." : "", "Foster" : "", "Foster Book" : "", "Foster Capacity" : "", "Foster Transfer" : "วันที่โอนย้าย", "Foster an animal" : "", "Foster book" : "", "Foster movements must have a valid foster date." : "", "Foster successfully created." : "", "Fostered" : "", "Fostered Animals" : "สัตว์ที่สูญหาย", "Fostered to {0} since {1}" : "", "Fosterer" : "", "Fosterer (Active Only)" : "", "Fosterer Medical Report" : "", "Found" : "", "Found Animal" : "สัตว์ที่พบเจอ", "Found Animal - Additional" : "", "Found Animal - Details" : "", "Found Animal Contact" : "", "Found Animal {0}" : "สัตว์ที่พบเจอ: {0}", "Found Animal: {0}" : "สัตว์ที่พบเจอ: {0}", "Found animal - {0} {1} [{2}]" : "", "Found animal entries matching '{0}'." : "", "Found animals must have a contact" : "", "Found animals reported in the last 30 days." : "", "Found from" : "", "Found to" : "", "FoundLost animal entry {0} successfully created." : "", "Fox Terrier" : "", "Foxhound" : "", "Fr" : "", "French Bulldog" : "", "French-Lop" : "", "Frequency" : "ความถี่", "Frequently Asked Questions" : "", "Fri" : "", "Friday" : "", "From" : "", "From Fostering" : "", "From Other" : "", "From retailer is only valid on adoption movements." : "", "Future notes" : "", "GDPR Contact Opt-In" : "", "Gaited" : "", "Gas Bills" : "", "Gecko" : "", "General" : "", "Generate" : "", "Generate Documents" : "", "Generate HTML from this SQL" : "", "Generate Report" : "", "Generate a document from this animal" : "", "Generate a document from this incident" : "", "Generate a document from this movement" : "", "Generate a document from this person" : "", "Generate a document from this record" : "", "Generate a javascript database for the search page" : "", "Generate a new animal code" : "", "Generate a random name for this animal" : "", "Generate document from this appointment" : "", "Generate document from this license" : "", "Generate document from this payment" : "", "Generate document from this transport" : "", "Generate documentation" : "", "Generate documents" : "", "Generate image thumbnails as tn_$$IMAGE$$" : "", "Generated document '{0}'" : "", "Gerbil" : "", "German Pinscher" : "", "German Shepherd Dog" : "", "German Shorthaired Pointer" : "", "German Wirehaired Pointer" : "", "Get more reports from sheltermanager.com" : "", "Gift Aid" : "", "GiftAid" : "", "Giftaid" : "", "Ginger" : "", "Ginger and White" : "เทาและขาว", "Give" : "", "Give Treatments" : "", "Give Vaccination" : "", "Given" : "", "Glen of Imaal Terrier" : "", "Go" : "", "Go the lookup data screen and add/remove breeds, species and animal types according to the animals your shelter deals with." : "", "Go the options screen and set your shelter's contact details and other settings." : "", "Go the system users screen and add user accounts for your staff." : "", "Goat" : "แพะ", "Golden" : "", "Golden Retriever" : "", "Goldfish" : "ปลาทอง", "Good With Cats" : "เป็นมิตรต่อแมว", "Good With Children" : "เป็นมิตรต่อเด็ก", "Good With Dogs" : "เป็นมิตรต่อสุนัข", "Good with Cats" : "เป็นมิตรต่อแมว", "Good with Children" : "เป็นมิตรต่อเด็ก", "Good with Dogs" : "เป็นมิตรต่อสุนัข", "Good with cats" : "เป็นมิตรต่อแมว", "Good with children" : "เป็นมิตรต่อเด็ก", "Good with dogs" : "เป็นมิตรต่อสุนัข", "Good with kids" : "เป็นมิตรต่อแมว", "Google+" : "", "Goose" : "", "Gordon Setter" : "", "Grade" : "", "Great Dane" : "", "Great Pyrenees" : "", "Greater Swiss Mountain Dog" : "", "Green" : "เขียว", "Grey" : "เทา", "Grey and White" : "เทาและขาว", "Greyhound" : "", "Guinea Pig" : "", "Guinea fowl" : "", "HMRC Gift Aid Spreadsheet" : "", "HTML" : "", "HTML Publishing Templates" : "", "HTML/FTP Publisher" : "", "Hairless" : "", "Half-Yearly" : "รายครึ่งปี", "Hamster" : "", "Harlequin" : "", "Havana" : "", "Havanese" : "", "Header" : "", "Health Problems" : "", "Health and Identification" : "", "Healthy" : "สมบูรณ์", "Heartworm" : "", "Heartworm Test Date" : "", "Heartworm Test Result" : "", "Heartworm Tested" : "", "Heartworm+" : "", "Hedgehog" : "", "Held" : "", "Help" : "", "Hepatitis" : "", "Here are some things you should do before you start adding animals and people to your database." : "", "Hidden" : "", "Hidden Comments" : "", "Hidden comments about the animal" : "", "Hide deceased animals from the home page" : "", "High" : "", "Highlight" : "", "Himalayan" : "", "History" : "ประวัติ", "Hold" : "", "Hold the animal until this date or blank to hold indefinitely" : "", "Hold until" : "", "Hold until {0}" : "", "Holland Lop" : "", "Home" : "", "Home Phone" : "", "Home page" : "", "Homecheck Areas" : "", "Homecheck Date" : "", "Homecheck History" : "", "Homecheck areas" : "", "Homechecked" : "", "Homechecked By" : "", "Homechecked by" : "", "Homechecker" : "", "Horizontal Pitch" : "", "Horse" : "ม้า", "Hotot" : "", "Hound" : "", "Hours" : "", "Housetrained" : "", "Hovawart" : "", "How urgent is it that we take this animal?" : "", "Husky" : "", "I've finished, Don't show me this popup again." : "", "IP Restriction" : "", "IP restriction is a space-separated list of IP netblocks in CIDR notation that this user is *only* permitted to login from (eg: 192.168.0.0/24 127.0.0.0/8). If left blank, the user can login from any address." : "", "Ibizan Hound" : "", "If the shelter provides initial insurance cover to new adopters, the policy number" : "", "If this form has a populated emailaddress field during submission, send a confirmation email to it" : "", "If this is the web preferred image, web publishers will use these notes as the animal description" : "", "If this person is a fosterer, the maximum number of animals they can care for." : "", "If this person is a member, the date that membership expires." : "", "If this person is a member, their membership number" : "", "If this person is a member, their membership number." : "", "If this stock record is for a drug, the batch number from the container" : "", "If this stock record is for a perishable good, the expiry date on the container" : "", "If you assign view or edit roles, only users within those roles will be able to view and edit this account." : "", "If you don't select any locations, publishers will include animals in all locations." : "", "Iguana" : "", "Illyrian Sheepdog" : "", "Image" : "", "Image file" : "", "Import" : "", "Import a CSV file" : "", "Import a PayPal CSV file" : "", "Import from file" : "", "Important" : "", "In" : "", "In SubTotal" : "", "In the last month" : "", "In the last quarter" : "", "In the last week" : "", "In the last year" : "", "In-Kind Donation" : "บริจาค", "Inactive" : "", "Inactive - do not include" : "", "Incident" : "", "Incident - Additional" : "", "Incident - Citation" : "", "Incident - Details" : "", "Incident - Dispatch" : "", "Incident - Owner" : "", "Incident Between" : "", "Incident Completed Types" : "", "Incident Date/Time" : "", "Incident Type" : "", "Incident Types" : "", "Incident date cannot be blank" : "", "Incident followup" : "", "Incident {0} successfully created." : "", "Incident {0}, {1}: {2}" : "", "Incidents" : "", "Incidents Requiring Followup" : "", "Include CSV header line" : "", "Include Removed" : "", "Include animals in the following locations" : "", "Include animals on trial adoption" : "", "Include animals who don't have a description" : "รวมสัตว์ที่ไม่มีภาพถ่ายด้วย", "Include animals who don't have a picture" : "", "Include cruelty case animals" : "", "Include deceased animals" : "", "Include fostered animals" : "", "Include found" : "", "Include held animals" : "", "Include incomplete medical records when generating document templates" : "", "Include incomplete vaccination and test records when generating document templates" : "", "Include non-shelter animals" : "", "Include off-shelter animals in medical calendar and books" : "", "Include preferred photo" : "", "Include quarantined animals" : "", "Include reserved animals" : "", "Include retailer animals" : "", "Include returned" : "", "Include this image when publishing" : "", "Include unaltered animals" : "รวมสัตว์ที่ไม่มีภาพถ่ายด้วย", "Income" : "", "Income from an on-site shop" : "", "Income::" : "", "Income::Adoption" : "", "Income::Donation" : "", "Income::EntryDonation" : "", "Income::Interest" : "", "Income::OpeningBalances" : "", "Income::Shop" : "", "Income::Sponsorship" : "", "Income::WaitingList" : "", "Incoming" : "", "Incoming Forms" : "", "Incoming donations (misc)" : "", "Incoming forms are online forms that have been completed and submitted by people on the web." : "", "Incomplete incidents" : "", "Incomplete notes upto today" : "", "Index" : "", "Individual/Couple" : "", "Induct a new animal" : "", "Information" : "ข้อมูล", "Initials" : "", "Install" : "", "Install the selected reports to your database" : "", "Insurance" : "", "Insurance No" : "", "Intake" : "", "Intakes {0}" : "", "Internal Location" : "", "Internal Locations" : "", "Invalid email address" : "", "Invalid email address '{0}'" : "", "Invalid microchip number length" : "", "Invalid time '{0}', times should be in 00:00 format" : "", "Invalid time, times should be in HH:MM format" : "", "Invalid username or password." : "", "Investigation" : "", "Investigations" : "", "Investigator" : "", "Invoice Only" : "", "Invoice items need a description and amount." : "", "Irish Setter" : "", "Irish Terrier" : "", "Irish Water Spaniel" : "", "Irish Wolfhound" : "", "Is this a permanent foster?" : "", "Is this a trial adoption?" : "", "Issue a new insurance number for this animal/adoption" : "", "Issue date and expiry date must be valid dates." : "", "Issued" : "", "Issued in the last month" : "", "Issued in the last week" : "", "Italian Greyhound" : "", "Italian Spinone" : "", "Item" : "", "Jack Russell Terrier" : "", "Jan" : "ม.ค.", "January" : "มกราคม", "Japanese Bobtail" : "", "Japanese Chin" : "", "Javanese" : "", "Jersey Wooly" : "", "Jindo" : "", "Jul" : "ก.ค.", "July" : "กรกฎาคม", "Jump to diary" : "", "Jump to donations" : "", "Jump to media" : "", "Jump to movements" : "", "Jun" : "มิ.ย.", "June" : "มิถุนายน", "Jurisdiction" : "ช่วงระยะ", "Jurisdictions" : "", "Kai Dog" : "", "Kakariki" : "", "Karelian Bear Dog" : "", "Keep table headers visible when scrolling" : "", "Keeshond" : "", "Kennel" : "", "Kerry Blue Terrier" : "", "Kishu" : "", "Kittens (under {0} months)" : "", "Km" : "", "Komondor" : "", "Korat" : "", "Kuvasz" : "", "Kyi Leo" : "", "Label" : "", "Labrador Retriever" : "", "Lakeland Terrier" : "", "Lancashire Heeler" : "", "Large" : "ใหญ่", "Last First" : "", "Last Location" : "", "Last Month" : "", "Last Name" : "", "Last Week" : "", "Last changed by {0} on {1}" : "", "Last name" : "", "Last, First" : "", "Latency" : "", "Latency Tester" : "", "Least recently changed" : "", "Leave" : "", "Leave of absence" : "", "Left Margin" : "", "Left shelter" : "", "Leonberger" : "", "Leptospirosis" : "", "Letter" : "", "Lhasa Apso" : "", "Liability" : "", "Licence for {0} successfully renewed {1} - {2}" : "", "License" : "", "License Number" : "", "License Types" : "", "License number '{0}' has already been issued." : "", "License numbers matching '{0}'." : "", "License requires a number" : "", "License requires a person" : "", "License requires issued and expiry dates" : "", "Licenses" : "", "Licensing" : "", "Lifetime" : "", "Light Amber" : "", "Lilac" : "", "Lilac Tortie" : "", "Limited to {0} matches" : "", "Link" : "ลิงค์", "Link an animal" : "", "Link to an external web resource" : "", "Link to this animal" : "", "Links" : "ลิงค์", "List" : "", "Litter" : "", "Litter Ref" : "", "Litter Reference" : "", "Littermates" : "", "Litters" : "", "Litters need at least a required date and number." : "", "Live Releases {0}" : "วันที่ปล่อย", "Liver" : "", "Liver and White" : "เทาและขาว", "Lizard" : "", "Llama" : "ลามะ", "Loading..." : "", "Loan" : "", "Local" : "ที่ตั้ง", "Locale" : "", "Location" : "ที่ตั้ง", "Location Filter" : "", "Location and Species" : "", "Location and Type" : "", "Location and Unit" : "", "Locations" : "ที่ตั้ง", "Log" : "ปูมบันทึก", "Log Text" : "", "Log Type" : "ประเภทของปูมบันทึก", "Log Types" : "ประเภทของปูมบันทึก", "Log date must be a valid date" : "", "Log entries need a date and text." : "", "Log requires a date." : "", "Log requires a person." : "", "Log requires an animal." : "", "Log successfully added." : "", "Login" : "เข้าสู่ระบบ", "Logout" : "ออกจากระบบ", "Long" : "ยาว", "Long term" : "", "Longest On Shelter" : "", "Looking For" : "", "Looking for" : "", "Lookup" : "", "Lookup (Multiple Select)" : "", "Lookup Values" : "", "Lookup data" : "", "Lookups" : "", "Lop Eared" : "", "Lory/Lorikeet" : "", "Lost" : "แมว", "Lost Animal" : "สัตว์ที่สูญหาย", "Lost Animal - Additional" : "", "Lost Animal - Details" : "", "Lost Animal Contact" : "", "Lost Animal: {0}" : "สัตว์ที่สูญหาย: {0}", "Lost and Found" : "", "Lost and found entries must have a contact" : "", "Lost animal - {0} {1} [{2}]" : "", "Lost animal entries matching '{0}'." : "", "Lost animal entry {0} successfully created." : "", "Lost animals must have a contact" : "", "Lost animals reported in the last 30 days." : "", "Lost from" : "", "Lost to" : "", "Lost/Found" : "", "Lots of reports installed? Clean up the Reports menu with Settings-Options- Display-Show report menu items in collapsed categories." : "", "Lovebird" : "", "Low" : "", "Lowchen" : "", "Lowest" : "", "M (Miscellaneous)" : "", "MM = current month" : "", "Macaw" : "", "Mail" : "", "Mail Merge" : "", "Mail Merge - {0}" : "", "Maine Coon" : "", "Make this the default image when creating documents" : "", "Make this the default image when viewing this record and publishing to the web" : "", "Make this the default video link when publishing to the web" : "", "Male" : "เพศผู้", "Maltese" : "", "Manchester Terrier" : "", "Mandatory" : "", "Manual" : "", "Manually enter codes (do not generate)" : "", "Manufacturer" : "", "Manx" : "", "Map" : "", "Map of active incidents" : "", "Mar" : "มี.ค.", "March" : "มีนาคม", "Maremma Sheepdog" : "", "Mark Deceased" : "", "Mark an animal deceased" : "", "Mark dispatched now" : "", "Mark new animals as not for adoption" : "", "Mark responded now" : "", "Mark selected payments received" : "", "Mark this owner homechecked" : "", "Mark treatments given" : "", "Marketer" : "", "Markings" : "คำเตือน", "Markup" : "", "Marriage/Relationship split" : "", "Mastiff" : "", "Match" : "มีนาคม", "Match Lost and Found" : "", "Match against other lost/found animals" : "", "Match lost and found animals" : "", "Match this animal with the lost and found database" : "", "Maternity" : "", "May" : "พ.ค.", "McNab" : "", "Media" : "มีเดีย", "Media Notes" : "", "Media notes contain" : "", "Medical" : "", "Medical Book" : "", "Medical Profiles" : "", "Medical book" : "", "Medical calendar" : "", "Medical profiles" : "", "Medical profiles need a profile name, treatment, dosage and frequencies." : "", "Medical regimens need an animal, name, dosage, a start date and frequencies." : "", "Medicate" : "", "Medicate Animal" : "", "Medium" : "กลาง", "Member" : "สมาชิก", "Membership Expiry" : "", "Membership Number" : "", "Merge" : "", "Merge Person" : "", "Merge another animal into this one" : "", "Merge another person into this one" : "", "Merge bonded animals into a single record" : "", "Merge duplicate records" : "", "Message" : "", "Message Board" : "", "Message from {0}" : "", "Message successfully sent to {0}" : "", "Messages" : "", "Messages successfully sent" : "", "Method" : "", "Microchip" : "", "Microchip Date" : "", "Microchip Number" : "", "Microchip number {0} has already been allocated to another animal." : "", "Microchipped" : "", "Miles" : "", "Mini Rex" : "", "Mini-Lop" : "", "Miniature Pinscher" : "", "Minutes" : "", "Missouri Foxtrotter" : "", "Mixed Breed" : "สายพันธุ์", "Mo" : "", "Mobile signing pad" : "", "Modify Additional Fields" : "", "Modify Document Templates" : "", "Modify Lookups" : "", "Mon" : "", "Monday" : "", "Money" : "เงิน", "Month" : "", "Monthly" : "รายเดือน", "More Info Needed" : "", "More Medications" : "", "More Tests" : "", "More Vaccinations" : "", "More diary notes" : "", "Morgan" : "", "Most browsers let you search in dropdowns by typing the first few letters of the item you want." : "", "Most browsers will let you visit a record you have been to in this session by typing part of its name in the address bar." : "", "Most recently changed" : "", "Most relevant" : "", "Mother" : "", "Mountain Cur" : "", "Mountain Dog" : "", "Mouse" : "", "Move" : "ไม่มี", "Move an animal to a retailer" : "", "Moved to animal record {0}" : "", "Movement" : "", "Movement Date" : "", "Movement Number" : "", "Movement Type" : "", "Movement Types" : "", "Movement dates clash with an existing movement." : "", "Movement numbers must be unique." : "", "Movements" : "", "Movements require an animal" : "", "Movements require an animal." : "", "Moving..." : "", "Multi-Lookup" : "", "Multiple Treatments" : "", "Munchkin" : "", "Munsterlander" : "", "Mustang" : "", "My Fosters" : "", "My Incidents" : "", "My Undispatched Incidents" : "", "My diary notes" : "", "My sheltermanager.com account" : "", "Mynah" : "", "N (Non-Shelter Animal)" : "", "NNN or NN = number unique for this type of animal for this year" : "", "Name" : "ชื่อ", "Name Contains" : "", "Name and Address" : "", "Name cannot be blank" : "", "Name contains" : "", "Neapolitan Mastiff" : "", "Negative" : "", "Neglect" : "", "Netherland Dwarf" : "", "Neuter/Spay" : "", "Neutered" : "", "Neutered/Spayed Non-Shelter Animals In {0}" : "", "Neutered/Spayed Shelter Animals In {0}" : "", "New" : "ใหม่", "New Account" : "", "New Appointment" : "", "New Citation" : "", "New Cost" : "", "New Diary" : "", "New Diet" : "", "New Document" : "", "New Field" : "", "New Fosterer" : "", "New Guinea Singing Dog" : "", "New Item" : "", "New License" : "", "New Litter" : "", "New Log" : "ปูมบันทึกใหม่", "New Movement" : "", "New Owner" : "", "New Password" : "", "New Payment" : "", "New Profile" : "", "New Record" : "", "New Regimen" : "", "New Report" : "", "New Role" : "", "New Stock" : "", "New Task" : "", "New Template" : "", "New Test" : "", "New Transport" : "", "New Trap Loan" : "", "New User" : "", "New Vaccination" : "", "New Voucher" : "", "New Waiting List Entry" : "", "New Zealand" : "", "New diary task" : "", "New form field" : "", "New name" : "", "New online form" : "", "New password and confirmation password don't match." : "", "New task detail" : "", "New template" : "", "Newfoundland Dog" : "", "Next" : "ข้อความ", "No" : "ไม่", "No adjustment" : "", "No data to show on the report." : "", "No data." : "", "No description" : "คำอธิบาย", "No longer retained" : "", "No matches found." : "", "No picture" : "", "No publishers are running." : "", "No results found." : "", "No results." : "", "No tasks are running." : "", "No view permission for this report" : "", "Noise" : "", "Non-Shelter" : "", "Non-Shelter Animal" : "", "Non-Shelter Animals" : "ลบสัตว์", "Non-shelter Animals" : "ลบสัตว์", "None" : "ไม่มี", "Norfolk Terrier" : "", "Normal user" : "", "Norwegian Buhund" : "", "Norwegian Elkhound" : "", "Norwegian Forest Cat" : "", "Norwegian Lundehund" : "", "Norwich Terrier" : "", "Not Arrived" : "", "Not Available For Adoption" : "", "Not Available for Adoption" : "", "Not For Adoption" : "", "Not Microchipped" : "", "Not Reconciled" : "", "Not available for adoption" : "", "Not dispatched" : "", "Not for adoption" : "", "Not for adoption flag set" : "", "Not in chosen publisher location" : "", "Not reconciled" : "", "Note" : "ไม่มี", "Notes" : "ไม่มี", "Notes about the death of the animal" : "", "Nov" : "พ.ย.", "Nova Scotia Duck-Tolling Retriever" : "", "November" : "พฤศจิกายน", "Now" : "", "Number" : "", "Number in litter" : "", "Number of Tasks" : "", "Number of animal links to show" : "", "Number of fields" : "", "Number of pets" : "", "Ocicat" : "", "Oct" : "ต.ค.", "October" : "ตุลาคม", "Office" : "", "Old English Sheepdog" : "", "Old Password" : "", "Omit criteria" : "", "Omit header/footer" : "", "On Foster (in figures)" : "", "On Shelter" : "", "On shelter for {0} days, daily cost {1}, cost record total <b>{2}</b>" : "", "On shelter for {0} days. Total cost: {1}" : "", "Once assigned, codes cannot be changed" : "", "Once signed, this document cannot be edited or tampered with." : "", "One Off" : "", "One-Off" : "", "Online Form: {0}" : "", "Online Forms" : "", "Online form fields need a name and label." : "", "Online forms can be linked to from your website and used to take information from visitors for applications, etc." : "", "Only PDF, HTML and JPG image files can be attached." : "", "Only active accounts" : "", "Only allow users with one of these roles to view this incident" : "", "Only show account totals for the current period, which starts on " : "", "Only show declawed" : "", "Only show pickups" : "", "Only show special needs" : "", "Only show transfers" : "", "Open Incidents" : "", "Open records in a new browser tab" : "", "Open reports in a new browser tab" : "", "Opening balances" : "", "Optional, the date the vaccination \"wears off\" and needs to be administered again" : "", "Options" : "", "Or move this diary on to" : "", "Order published animals by" : "", "Organisation" : "", "Organization" : "", "Organization name" : "", "Oriental Long Hair" : "", "Oriental Short Hair" : "", "Oriental Tabby" : "", "Original Owner" : "", "Ostrich" : "", "Other Account" : "", "Other Organisation" : "", "Other Shelter" : "", "Otterhound" : "", "Our shelter does trial adoptions, allow us to mark these on movement screens" : "", "Out" : "", "Out Between" : "", "Out SubTotal" : "", "Output a deceased animals page" : "", "Output a page with links to available online forms" : "", "Output a separate page for each animal type" : "", "Output a separate page for each species" : "", "Output an adopted animals page" : "", "Output an rss.xml page" : "", "Overdue" : "", "Overdue medical items" : "", "Overtime" : "", "Owl" : "", "Owner" : "เจ้าของ", "Owner Vet" : "", "Owner given citation" : "", "Owners Vet" : "", "PM" : "", "Page extension" : "", "Paid" : "", "Paint/Pinto" : "", "Palomino" : "", "Paper Size" : "", "Papillon" : "", "Parainfluenza" : "", "Parakeet (Other)" : "", "Parent" : "", "Parrot (Other)" : "", "Parrotlet" : "", "Parvovirus" : "", "Paso Fino" : "", "Pass Homecheck" : "", "Password" : "", "Password for '{0}' has been reset." : "", "Password is incorrect." : "", "Password successfully changed." : "", "Passwords cannot be blank." : "", "Path" : "", "Patterdale Terrier (Fell Terrier)" : "", "PayPal" : "", "Payment" : "", "Payment Book" : "", "Payment From" : "", "Payment Methods" : "", "Payment Type" : "", "Payment Types" : "", "Payment book" : "", "Payment calendar" : "", "Payment of {0} successfully received ({1})." : "", "Payments" : "", "Payments need at least one date, an amount and a person." : "", "Payments of type" : "", "Payments require a person" : "", "Payments require a received date" : "", "Peacock/Pea fowl" : "", "Pekingese" : "", "Pending Adoption" : "", "Pending Apartment Verification" : "", "Pending Home Visit" : "", "Pending Vet Check" : "", "Pension" : "", "People" : "", "People Looking For" : "", "People matching '{0}'." : "", "People or animal records that already exist in the database will not be imported again and movement/payment data will be attached to the existing records instead." : "", "People with active reservations, but no homecheck has been done." : "", "People with overdue donations." : "", "Percheron" : "", "Perform" : "", "Perform Homecheck" : "", "Perform Test" : "", "Performed" : "", "Permanent Foster" : "", "Persian" : "", "Person" : "", "Person - Additional" : "", "Person - Name and Address" : "", "Person - Type" : "", "Person Flags" : "", "Person looking for report" : "", "Person successfully created" : "", "Personal" : "", "Peruvian Inca Orchid" : "", "Peruvian Paso" : "", "Petit Basset Griffon Vendeen" : "", "Pharaoh Hound" : "", "Pheasant" : "", "Phone" : "โทร", "Phone contains" : "", "Photo successfully uploaded." : "", "Picked Up" : "", "Picked Up By" : "", "Pickup" : "", "Pickup Address" : "", "Pickup Location" : "", "Pickup Locations" : "", "Pig" : "หมู", "Pig (Farm)" : "หมู (ฟาร์ม)", "Pigeon" : "นกพิราบ", "Pinterest" : "", "Pionus" : "", "Pit Bull Terrier" : "", "Pixie-Bob" : "", "Please click the Sign button when you are finished." : "", "Please see the manual for more information." : "", "Please select a PDF, HTML or JPG image file to attach" : "", "Please tighten the scope of your email campaign to {0} emails or less." : "", "Please use the links below to electronically sign these documents." : "", "Plott Hound" : "", "Poicephalus/Senegal" : "", "Pointer" : "", "Points for being found within 2 weeks of being lost" : "", "Points for matching age group" : "", "Points for matching breed" : "", "Points for matching color" : "", "Points for matching features" : "", "Points for matching lost/found area" : "", "Points for matching sex" : "", "Points for matching species" : "", "Points for matching zipcode" : "", "Points required to appear on match report" : "", "Polish" : "", "Polish Lowland Sheepdog" : "", "Pomeranian" : "", "Pony" : "", "Poodle" : "", "Portugese Podengo" : "", "Portuguese Water Dog" : "", "Positive" : "", "Positive for Heartworm, FIV or FLV" : "", "Positive/Negative" : "", "Post" : "รหัสไปรษณีย์", "Postage costs" : "", "Pot Bellied" : "", "Prairie Dog" : "", "Prefill new media notes for animal images with animal comments if left blank" : "", "Prefill new media notes with the filename if left blank" : "", "Premises" : "", "Presa Canario" : "", "Press F11 in HTML or SQL code editing boxes to edit in fullscreen mode" : "", "Preview" : "", "Previous" : "", "Previous Adopter" : "", "Print" : "พิมพ์", "Print Preview" : "", "Print selected forms" : "", "Printable Manual" : "", "Printing word processor documents uses hidden iframe and window.print" : "", "Priority" : "", "Priority Floor" : "", "Produce a CSV File" : "", "Produce a PDF of printable labels" : "", "Profile" : "", "Profile name cannot be blank" : "", "Public Holiday" : "", "Publish Animals to the Internet" : "", "Publish HTML via FTP" : "", "Publish now" : "", "Publish to folder" : "", "Published to Website" : "", "Publisher" : "", "Publisher Breed" : "", "Publisher Color" : "", "Publisher Logs" : "", "Publisher Species" : "", "Publishing" : "", "Publishing History" : "", "Publishing Logs" : "", "Publishing Options" : "", "Publishing complete." : "", "Publishing template" : "", "Pug" : "", "Puli" : "", "Pumi" : "", "Puppies (under {0} months)" : "", "Purchased" : "", "Qty" : "", "Quaker Parakeet" : "", "Quantity" : "", "Quarantine" : "", "Quarterhorse" : "", "Quarterly" : "รายไตรมาสต์", "Quick Links" : "", "Quicklinks" : "", "Quicklinks are shown on the home page and allow quick access to areas of the system." : "", "R" : "", "Rabbit" : "กระต่าย", "Rabies" : "", "Rabies Tag" : "", "RabiesTag" : "", "Radio Buttons" : "", "Ragamuffin" : "", "Ragdoll" : "", "Rank" : "อันดับ", "Rat" : "", "Rat Terrier" : "", "Raw Markup" : "", "Read the manual for more information about Animal Shelter Manager." : "", "Real name" : "", "Reason" : "เหตุผล", "Reason For Appointment" : "", "Reason Not From Owner" : "", "Reason for Entry" : "", "Reason for entry" : "", "Reason not from Owner" : "", "Reason the owner did not bring in the animal themselves" : "", "Recalculate ALL animal ages/times" : "", "Recalculate ALL animal locations" : "", "Recalculate on-shelter animal locations" : "", "Receipt No" : "", "Receipt/Invoice" : "", "Receive" : "", "Receive a donation" : "", "Receive a payment" : "", "Received" : "", "Received in last day" : "", "Received in last month" : "", "Received in last week" : "", "Received in last year" : "", "Received today" : "", "Recently Adopted" : "", "Recently Changed" : "", "Recently Entered Shelter" : "", "Recently Fostered" : "", "Recently deceased" : "", "Recently deceased shelter animals (last 30 days)." : "", "Reception" : "", "Reclaim" : "", "Reclaim an animal" : "", "Reclaim movements must have a valid reclaim date." : "", "Reclaim successfully created." : "", "Reclaimed" : "", "Reconcile" : "", "Reconciled" : "", "Redbone Coonhound" : "", "Rediarised" : "", "Redirect to URL after POST" : "", "Reference" : "", "Refresh" : "", "Regenerate 'Match lost and found animals' report" : "", "Regenerate 'Person looking for' report" : "", "Regenerate annual animal figures for" : "", "Regenerate monthly animal figures for" : "", "Regenerate person names in selected format" : "", "Register Microchip" : "", "Register microchips after" : "", "Released To Wild" : "", "Released To Wild {0}" : "", "Reload" : "", "Remaining" : "", "Remember me on this computer" : "", "Removal" : "เอาออก", "Removal Reason" : "", "Removal reason" : "", "Remove" : "", "Remove HTML and PDF document media after this many years" : "", "Remove clinic functionality from screens and menus" : "", "Remove fine-grained animal control incident permissions" : "", "Remove holds after" : "", "Remove move menu and the movements tab from animal and person screens" : "", "Remove personally identifiable data" : "", "Remove previously published files before uploading" : "", "Remove retailer functionality from the movement screens and menus" : "", "Remove short shelter code box from the animal details screen" : "", "Remove the FIV/L test fields from animal health details" : "", "Remove the Litter ID field from animal details" : "", "Remove the Rabies Tag field from animal health details" : "", "Remove the adoption coordinator field from animal entry details" : "", "Remove the adoption fee field from animal details" : "", "Remove the animal control functionality from menus and screens" : "", "Remove the bonded with fields from animal entry details" : "", "Remove the city/state fields from person details" : "", "Remove the coat type field from animal details" : "", "Remove the declawed box from animal health details" : "", "Remove the document repository functionality from menus" : "", "Remove the good with fields from animal notes" : "", "Remove the heartworm test fields from animal health details" : "", "Remove the insurance number field from the movement screens" : "", "Remove the location unit field from animal details" : "", "Remove the microchip fields from animal identification details" : "", "Remove the neutered fields from animal health details" : "", "Remove the online form functionality from menus" : "", "Remove the picked up fields from animal entry details" : "", "Remove the rota functionality from menus and screens" : "", "Remove the size field from animal details" : "", "Remove the stock control functionality from menus and screens" : "", "Remove the tattoo fields from animal identification details" : "", "Remove the transport functionality from menus and screens" : "", "Remove the trap loan functionality from menus and screens" : "", "Remove the weight field from animal details" : "", "Removed" : "", "Rename" : "", "Renew License" : "", "Renew licence" : "", "Renew license" : "", "Report" : "รายงาน", "Report Title" : "", "Report a new incident" : "", "Reports" : "รายงาน", "Request signature by email" : "", "Requested" : "", "Require followup" : "", "Required" : "", "Required date must be a valid date" : "", "Reschedule" : "", "Reservation" : "", "Reservation Book" : "", "Reservation Cancelled" : "", "Reservation Date" : "", "Reservation For" : "", "Reservation Status" : "", "Reservation Statuses" : "", "Reservation book" : "", "Reservation date cannot be after cancellation date." : "", "Reservation successfully created." : "", "Reservations must have a valid reservation date." : "", "Reserve" : "", "Reserve an animal" : "", "Reserved" : "", "Reset" : "", "Reset Password" : "รีเซ็ตรหัสผ่าน", "Respond" : "", "Responded" : "", "Responded Between" : "", "Responded Date/Time" : "", "Result" : "", "Results" : "", "Results for '{0}'." : "", "Retailer" : "", "Retailer Animals" : "ลบสัตว์", "Retailer Book" : "", "Retailer book" : "", "Retailer movement successfully created." : "", "Retailer movements must have a valid movement date." : "", "Retriever" : "", "Return" : "", "Return Category" : "", "Return Date" : "", "Return a transferred animal" : "", "Return an animal from adoption" : "", "Return an animal from another movement" : "", "Return an animal from transfer" : "", "Return date cannot be before the movement date." : "", "Return this movement and bring the animal back to the shelter" : "", "Returned" : "", "Returned By" : "สร้างโดย", "Returned To Owner" : "", "Returned from" : "", "Returned to" : "", "Returned to Owner {0}" : "", "Returning" : "", "Returns {0}" : "", "Reupload animal images every time" : "", "Rex" : "", "Rhea" : "", "Rhinelander" : "", "Rhodesian Ridgeback" : "", "Ringneck/Psittacula" : "", "Role is in use and cannot be deleted." : "", "Roles" : "", "Roles need a name." : "", "Rosella" : "", "Rostered day off" : "", "Rota" : "", "Rota Types" : "", "Rota cloned successfully." : "", "Rotate image 90 degrees anticlockwis" : "", "Rotate image 90 degrees clockwise" : "", "Rottweiler" : "", "Rough" : "", "Rows" : "", "Ruddy" : "", "Russian Blue" : "", "S (Stray Cat)" : "", "S = first letter of animal species" : "", "SM Account" : "", "SMS" : "", "SQL" : "SQL", "SQL Interface" : "", "SQL dump" : "", "SQL dump (ASM2 HSQLDB Format)" : "", "SQL editor: Press F11 to go full screen and press CTRL+SPACE to autocomplete table and column names" : "", "SQL interface" : "", "SQL is syntactically correct." : "", "SS = first and second letter of animal species" : "", "Sa" : "", "Saddlebred" : "", "Saint Bernard St. Bernard" : "", "Sales Tax" : "", "Saluki" : "", "Samoyed" : "", "Sat" : "", "Satin" : "", "Saturday" : "", "Save" : "", "Save and leave" : "", "Save this incident" : "", "Save this person" : "", "Save this record" : "", "Save this waiting list entry" : "", "Saving..." : "", "Scale published animal images to" : "", "Scheduled" : "", "Schipperke" : "", "Schnauzer" : "", "Scottish Deerhound" : "", "Scottish Fold" : "", "Scottish Terrier Scottie" : "", "Script" : "", "Seal" : "", "Sealyham Terrier" : "", "Search" : "ค้นหา", "Search Results for '{0}'" : "", "Search returned {0} results." : "", "Search sort order" : "", "Searchable" : "", "Second offence" : "", "Select" : "เลือก", "Select a person" : "", "Select a person to attach this form to." : "", "Select a person to merge into this record. The selected person will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "", "Select all" : "", "Select an animal" : "เลือกสัตว์", "Select an animal to attach this form to." : "", "Select an animal to merge into this record. The selected animal will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "", "Select animal to merge" : "เลือกสัตว์", "Select animals" : "เลือกสัตว์", "Select date for diary task" : "", "Select person to merge" : "", "Select recommended" : "", "Selected On-Shelter Animals" : "ลบสัตว์ที่สูญหาย", "Selkirk Rex" : "", "Send" : "ส่ง", "Send Emails" : "", "Send a weekly email to fosterers with medical information about their animals" : "ข้อมูลเพิ่มเติมเกี่ยวกับสัตว์", "Send confirmation email to form submitter" : "", "Send emails" : "", "Send mass emails and perform mail merges" : "", "Send via email" : "", "Sending {0} emails is considered abusive and will damage the reputation of the email server." : "", "Sending..." : "กำลังจัดเรียง...", "Senior" : "", "Sent to mobile signing pad." : "", "Sep" : "ก.ย.", "Separate waiting list rank by species" : "", "September" : "กันยายน", "Server clock adjustment" : "", "Set publishing options" : "", "Set this to 0 to never automatically remove." : "", "Set to 0 to never update urgencies." : "", "Set wether or not this user account can log in to the user interface." : "", "Setter" : "", "Setting a location filter will prevent this user seeing animals who are not in these locations on shelterview, find animal and search." : "", "Settings" : "การตั้งค่า", "Settings, Lookup data" : "", "Settings, Options" : "", "Settings, Reports" : "", "Settings, System user accounts" : "", "Sex" : "เพศ", "Sex and Species" : "เลือกสปีชี่ส์", "Sexes" : "", "Shar Pei" : "", "Share" : "", "Shared weblink" : "", "Shares" : "", "Sheep" : "แกะ", "Sheep Dog" : "", "Shelter" : "", "Shelter Animal" : "", "Shelter Animals" : "", "Shelter Details" : "", "Shelter animal {0} '{1}'" : "", "Shelter animals" : "", "Shelter code cannot be blank" : "", "Shelter code {0} has already been allocated to another animal." : "", "Shelter stats (all time)" : "", "Shelter stats (this month)" : "", "Shelter stats (this week)" : "", "Shelter stats (this year)" : "", "Shelter stats (today)" : "", "Shelter view" : "", "Shepherd" : "", "Shetland Sheepdog Sheltie" : "", "Shiba Inu" : "", "Shift" : "", "Shih Tzu" : "", "Short" : "สั้น", "Show GDPR Contact Opt-In field on person screens" : "", "Show PDF files inline instead of sending them as attachments" : "", "Show a cost field on medical/test/vaccination screens" : "", "Show a minimap of the address on person screens" : "", "Show a separate paid date field with costs" : "", "Show alerts on the home page" : "", "Show animal thumbnails in movement and medical books" : "", "Show animals adopted" : "", "Show codes on the shelter view screen" : "", "Show complete comments in table views" : "", "Show empty locations" : "", "Show on new record screens" : "", "Show quick links on all pages" : "", "Show quick links on the home page" : "", "Show report menu items in collapsed categories" : "", "Show short shelter codes on screens" : "", "Show the adoption fee field" : "", "Show the altered fields" : "", "Show the breed fields" : "", "Show the brought in by field" : "", "Show the color field" : "", "Show the date brought in field" : "", "Show the entry category field" : "", "Show the full diary (instead of just my notes) on the home page" : "", "Show the hold fields" : "", "Show the internal location field" : "", "Show the litter ID field" : "", "Show the location unit field" : "", "Show the microchip fields" : "", "Show the original owner field" : "", "Show the size field" : "", "Show the tattoo fields" : "", "Show the time brought in field" : "", "Show the transfer in field" : "", "Show the weight field" : "", "Show timeline on the home page" : "", "Show tips on the home page" : "", "Show transactions from" : "", "Show weight as lb rather than kg" : "", "Showing {0} timeline events." : "", "Siamese" : "", "Siberian" : "", "Siberian Husky" : "", "Sick leave" : "", "Sick/Injured" : "ป่วย/บาดเจ็บ", "Sick/injured animal" : "", "Sign" : "", "Sign document" : "", "Sign on screen" : "", "Signature" : "", "Signed" : "", "Signing" : "", "Signing Pad" : "", "Signup" : "", "Silky Terrier" : "", "Silver" : "", "Silver Fox" : "", "Silver Marten" : "", "Similar Animal" : "", "Similar Person" : "", "Simple" : "อย่างง่าย", "Singapura" : "", "Single Treatment" : "", "Site" : "ขนาด", "Sites" : "ขนาด", "Size" : "ขนาด", "Sizes" : "ขนาด", "Skunk" : "", "Skye Terrier" : "", "Sloughi" : "", "Small" : "เล็ก", "SmartTag PETID" : "", "Smooth Fox Terrier" : "", "Snake" : "งู", "Snowshoe" : "", "Social" : "", "Softbill (Other)" : "", "Sold" : "", "Somali" : "", "Some batch processes may take a few minutes to run and could prevent other users being able to use the system for a short time." : "", "Some browsers allow shortcut keys, press SHIFT+ALT+A in Chrome or Firefox to jump to the animal adoption screen." : "", "Some info text" : "", "Sorrel" : "", "Sorrel Tortoiseshell" : "", "Sorry, this document has already been signed" : "", "South Russian Ovcharka" : "", "Spaniel" : "", "Special Needs" : "", "Species" : "สปีชี่ส์", "Species A-Z" : "", "Species Z-A" : "", "Species to use when publishing to third party services and adoption sites" : "", "Specifying a reschedule date will make copies of the selected vaccinations and mark them to be given on the reschedule date. Example: If this vaccination needs to be given every year, set the reschedule date to be 1 year from today." : "", "Sphynx (hairless cat)" : "", "Spitz" : "", "Split baby/adult age at" : "", "Split species pages with a baby/adult prefix" : "", "Sponsorship donations" : "", "Staff" : "", "Staff Rota" : "", "Staff record" : "", "Staff rota" : "", "Staffordshire Bull Terrier" : "", "Standard" : "", "Standardbred" : "", "Start Date" : "", "Start Of Day" : "", "Start Time" : "", "Start at" : "", "Start date" : "", "Start date must be a valid date" : "", "Start of year" : "", "Started" : "", "Starts" : "สถานะ", "State" : "สถานะ", "State contains" : "", "Stationary costs" : "", "Stats" : "สถานะ", "Stats period" : "", "Stats show running figures for the selected period of animals entering and leaving the shelter on the home page." : "", "Status" : "สถานะ", "Status and Species" : "เลือกสปีชี่ส์", "Stay" : "", "Stock" : "", "Stock Control" : "", "Stock Levels" : "", "Stock Locations" : "", "Stock Take" : "", "Stock Usage Type" : "", "Stock level must have a name" : "", "Stock level must have a unit" : "", "Stock needs a name and unit." : "", "Stocktake" : "", "Stolen" : "", "Stolen {0}" : "", "Stop" : "", "Stop Publishing" : "", "Stores" : "", "Stray" : "", "Su" : "", "SubTotal" : "", "Subject" : "", "Submission received: {0}" : "", "Success" : "", "Successfully attached to {0}" : "", "Sugar Glider" : "", "Sun" : "", "Sunday" : "", "Super user" : "", "Superuser" : "", "Surname" : "นามสกุล", "Surrender" : "", "Surrender Pickup" : "", "Suspect" : "", "Suspect 1" : "", "Suspect 2" : "", "Suspect 3" : "", "Suspect/Animal" : "", "Swan" : "", "Swedish Vallhund" : "", "Syntax check this SQL" : "", "System" : "", "System Admin" : "", "System Options" : "", "System user accounts" : "", "T = first letter of animal type" : "", "TNR" : "", "TNR - Trap/Neuter/Release" : "", "TT = first and second letter of animal type" : "", "Tabby" : "", "Tabby and White" : "", "Take another payment" : "", "Taken By" : "", "Tan" : "", "Tan and Black" : "", "Tan and White" : "", "Task complete." : "เสร็จแล้ว", "Task items are executed in order of index, lowest to highest" : "", "Tattoo" : "", "Tattoo Date" : "", "Tattoo Number" : "", "Tax" : "", "Tax Amount" : "", "Tax Rate %" : "", "Telephone" : "โทรศัพท์", "Telephone Bills" : "", "Template" : "", "Template Name" : "", "Template names can include a path portion with /, eg: Vets/Rabies Certificate" : "", "Tennessee Walker" : "", "Terrapin" : "", "Terrier" : "", "Test" : "ข้อความ", "Test Animal" : "สัตว์ที่สูญหาย", "Test Book" : "", "Test Performed" : "", "Test Results" : "", "Test Types" : "", "Test book" : "", "Test marked as performed for {0} - {1}" : "", "Tests" : "ข้อความ", "Tests need an animal and at least a required date." : "", "Text" : "ข้อความ", "Text Encoding" : "", "Th" : "", "Thai Ridgeback" : "", "Thank you for choosing Animal Shelter Manager for your shelter!" : "", "Thank you, the document is now signed." : "", "That animal is already linked to the incident" : "", "The CSV file should be created by PayPal's \"All Activity\" report." : "", "The SmartTag PETID number" : "", "The SmartTag type" : "", "The URL is the address of a web resource, eg: www.youtube.com/watch?v=xxxxxx" : "", "The animal name" : "", "The animal record to merge must be different from the original." : "", "The animal sex" : "", "The base color of this animal" : "", "The coat type of this animal" : "", "The confirmation email message to send to the form submitter. Leave blank to send a copy of the completed form." : "", "The database will be inaccessible to all users while the export is in progress." : "", "The date reported to the shelter" : "", "The date the animal died" : "", "The date the animal was FIV/L tested" : "", "The date the animal was adopted" : "", "The date the animal was altered" : "", "The date the animal was born" : "", "The date the animal was brought into the shelter" : "", "The date the animal was heartworm tested" : "", "The date the animal was microchipped" : "", "The date the animal was reclaimed" : "", "The date the animal was tattooed" : "", "The date the foster animal will be returned if known" : "", "The date the foster is effective from" : "", "The date the litter entered the shelter" : "", "The date the owner last contacted the shelter" : "", "The date the payment was received" : "", "The date the reservation is effective from" : "", "The date the retailer movement is effective from" : "", "The date the transfer is effective from" : "", "The date the trial adoption is over" : "", "The date the vaccination is required/due to be administered" : "", "The date the vaccination was administered" : "", "The date this animal was found" : "", "The date this animal was lost" : "", "The date this animal was put on the waiting list" : "", "The date this animal was removed from the waiting list" : "", "The date this animal was reserved" : "", "The date this animal was returned to its owner" : "", "The date this person was homechecked." : "", "The default username is 'user' with the password 'letmein'" : "", "The entry reason for this animal" : "", "The litter this animal belongs to" : "", "The locale determines the language ASM will use when displaying text, dates and currencies." : "", "The location where the animal was picked up" : "", "The microchip number" : "", "The movement number '{0}' is not unique." : "", "The number of stock records to create" : "", "The period in days before waiting list urgency is increased" : "", "The person record to merge must be different from the original." : "", "The primary breed of this animal" : "", "The reason the owner wants to part with the animal" : "", "The reason this animal was removed from the waiting list" : "", "The remaining units in the container" : "", "The result of the FIV test" : "", "The result of the FLV test" : "", "The result of the heartworm test" : "", "The retail/resale price per unit" : "", "The secondary breed of this animal" : "", "The selected file is not an image." : "", "The shelter category for this animal" : "", "The shelter reference number" : "", "The sheltermanager.com admin account password cannot be changed here, please visit {0}" : "", "The size of this animal" : "", "The species of this animal" : "", "The tattoo number" : "", "The type of unit in the container, eg: tablet, vial, etc." : "", "The veterinary license number." : "", "The wholesale/trade price the container was bought for" : "", "There is not enough information in the form to attach to a shelter animal record (need an animal name)." : "", "There is not enough information in the form to create a found animal record (need a description and area found)." : "", "There is not enough information in the form to create a lost animal record (need a description and area lost)." : "", "There is not enough information in the form to create a person record (need a surname)." : "", "There is not enough information in the form to create a transport record (need animalname)." : "", "There is not enough information in the form to create a transport record (need pickupdate and dropoffdate)." : "", "There is not enough information in the form to create a waiting list record (need a description)." : "", "There is not enough information in the form to create an incident record (need call notes and dispatch address)." : "", "These are the HTML headers and footers used when displaying online forms." : "", "These are the HTML headers and footers used when generating reports." : "", "These are the default values for these fields when creating new records." : "", "These batch processes are run each night by the system and should not need to be run manually." : "", "These fields allow you to deduct stock for the test(s) given. This single deduction should cover the selected tests being performed." : "", "These fields allow you to deduct stock for the treatment(s) given. This single deduction should cover the selected treatments being administered." : "", "These fields allow you to deduct stock for the vaccination(s) given. This single deduction should cover the selected vaccinations being administered." : "", "These fields determine which columns are shown on the find animal and find person screens." : "", "These numbers are for shelters who have agreements with insurance companies and are given blocks of policy numbers to allocate." : "", "These options change the behaviour of the search box at the top of the page." : "", "These values are required for correct operation of the system. ONLY change them if you are translating to another language." : "", "Third offence" : "", "This Month" : "", "This Week" : "", "This Year" : "", "This animal already has an active reservation." : "", "This animal has a SmartTag PETID" : "", "This animal has a tattoo" : "", "This animal has active reservations, they will be cancelled." : "", "This animal has an adoption fee of {0}" : "", "This animal has been FIV/L tested" : "", "This animal has been altered" : "", "This animal has been declawed" : "", "This animal has been heartworm tested" : "", "This animal has movements and cannot be removed." : "", "This animal has not been altered." : "", "This animal has not been microchipped." : "", "This animal has special needs" : "", "This animal has the same name as another animal recently added to the system." : "", "This animal is a crossbreed" : "", "This animal is bonded with {0}" : "", "This animal is bonded with {0}. Adoption movement records will be created for all bonded animals." : "", "This animal is currently at a retailer, it will be automatically returned first." : "", "This animal is currently fostered, it will be automatically returned first." : "", "This animal is currently held and cannot be adopted." : "", "This animal is currently quarantined and should not leave the shelter." : "", "This animal is marked not for adoption." : "", "This animal is microchipped" : "", "This animal is not on the shelter." : "", "This animal is part of a cruelty case and should not leave the shelter." : "", "This animal should be held in case it is reclaimed" : "", "This animal should not be shown in figures and is not in the custody of the shelter" : "", "This animal was dead on arrival to the shelter" : "", "This animal was euthanized" : "", "This animal was picked up" : "", "This animal was transferred from another shelter" : "", "This code has already been used." : "", "This database is locked and in read-only mode. You cannot add, change or delete records." : "", "This database is locked." : "", "This date of birth is an estimate" : "", "This expense account is the source for costs of this type" : "", "This income account is the source for payments received of this type" : "", "This item is referred to in the database ({0}) and cannot be deleted until it is no longer in use." : "", "This many years after creation of a person record, the name, address and telephone data will be anonymized." : "", "This month" : "", "This movement cannot be from a retailer when the animal has no prior retailer movements." : "", "This person has an animal control incident against them" : "", "This person has an animal control incident against them." : "", "This person has been banned from adopting animals" : "", "This person has been banned from adopting animals." : "", "This person has been under investigation" : "", "This person has been under investigation." : "", "This person has movements and cannot be removed." : "", "This person has not passed a homecheck" : "", "This person has not passed a homecheck." : "", "This person has payments and cannot be removed." : "", "This person has previously surrendered an animal." : "", "This person is linked to a waiting list record and cannot be removed." : "", "This person is linked to an animal and cannot be removed." : "", "This person is linked to an investigation and cannot be removed." : "", "This person is linked to animal control and cannot be removed." : "", "This person is linked to animal licenses and cannot be removed." : "", "This person is linked to animal transportation and cannot be removed." : "", "This person is linked to citations and cannot be removed." : "", "This person is linked to found animals and cannot be removed." : "", "This person is linked to lost animals and cannot be removed." : "", "This person is linked to trap loans and cannot be removed." : "", "This person is not flagged as a fosterer and cannot foster animals." : "", "This person is not flagged as a retailer and cannot handle retailer movements." : "", "This person is very similar to another person on file, carry on creating this record?" : "", "This person lives in the same area as the person who brought the animal to the shelter." : "", "This record has been changed by another user, please reload." : "", "This report cannot be sent by email as it requires criteria to run." : "", "This screen allows you to add extra documents to your database, for staff training, reference materials, etc." : "", "This screen allows you to add extra images to your database, for use in reports and documents." : "", "This type of movement requires a date." : "", "This type of movement requires a person." : "", "This week" : "", "This will permanently remove the selected records, are you sure?" : "", "This will permanently remove the selected roles, are you sure?" : "", "This will permanently remove the selected user accounts. Are you sure?" : "", "This will permanently remove this account and ALL TRANSACTIONS HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "", "This will permanently remove this additional field and ALL DATA CURRENTLY HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "", "This will permanently remove this animal, are you sure?" : "", "This will permanently remove this incident, are you sure?" : "", "This will permanently remove this person, are you sure?" : "", "This will permanently remove this record, are you sure?" : "", "This will permanently remove this waiting list entry, are you sure?" : "", "This will remove ALL rota entries for the week beginning {0}. This action is irreversible, are you sure?" : "", "This year" : "", "Thoroughbred" : "", "Thu" : "", "Thumbnail size" : "", "Thursday" : "", "Tibetan Mastiff" : "", "Tibetan Spaniel" : "", "Tibetan Terrier" : "", "Tiger" : "เสือ", "Time" : "", "Time Brought In" : "", "Time On List" : "", "Time On Shelter" : "", "Time on list" : "", "Time on shelter" : "", "Timeline" : "", "Timeline ({0})" : "", "Times should be in HH:MM format, eg: 09:00, 16:30" : "", "Title" : "", "Title First Last" : "", "Title Initials Last" : "", "To" : "", "To Adoption" : "", "To Fostering" : "", "To Other" : "", "To Retailer" : "", "To add people to the rota, create new person records with the staff or volunteer flag." : "", "To continue using ASM, please renew {0}" : "", "To week beginning" : "", "Today" : "", "Tonkinese" : "", "Too Many Animals" : "สัตว์ที่พบเจอ", "Tooltip" : "", "Top Margin" : "", "Tortie" : "", "Tortie and White" : "", "Tortoise" : "", "Tosa Inu" : "", "Total" : "", "Total number of units in the container" : "", "Total payments" : "", "Toucan" : "", "Toy Fox Terrier" : "", "Training" : "", "Transactions" : "", "Transactions need a date and description." : "", "Transfer" : "", "Transfer In" : "โอนย้ายเข้า", "Transfer To" : "โอนย้ายเข้า", "Transfer an animal" : "", "Transfer from Municipal Shelter" : "", "Transfer from Other Shelter" : "", "Transfer successfully created." : "", "Transfer?" : "", "Transferred" : "โอนย้ายเข้า", "Transferred From" : "โอนย้ายเข้า", "Transferred In" : "โอนย้ายเข้า", "Transferred In {0}" : "โอนย้ายเข้า", "Transferred Out" : "โอนย้ายออก", "Transferred Out {0}" : "โอนย้ายออก", "Transfers must have a valid transfer date." : "", "Transport" : "", "Transport Book" : "", "Transport Types" : "ประเภทกราฟ", "Transport book" : "", "Transport requires an animal" : "", "Transports must have valid pickup and dropoff dates and times." : "", "Trap Loans" : "", "Trap Number" : "", "Trap Types" : "", "Trap loan" : "", "Trap loans" : "", "Treat animals at retailers as part of the shelter inventory" : "", "Treat foster animals as part of the shelter inventory" : "", "Treat trial adoptions as part of the shelter inventory" : "", "Treatment" : "", "Treatment Given" : "", "Treatment marked as given for {0} - {1}" : "", "Treatment name cannot be blank" : "", "Treatments" : "", "Treeing Walker Coonhound" : "", "Trial Adoption" : "", "Trial adoption" : "", "Trial adoption book" : "", "Trial ends on" : "", "Tricolour" : "", "Trigger Batch Processes" : "", "Tu" : "", "Tue" : "", "Tuesday" : "", "Tumblr" : "", "Turkey" : "", "Turkish Angora" : "", "Turkish Van" : "", "Turtle" : "เต่า", "Twitter" : "", "Type" : "ประเภท", "Type of animal links to show" : "", "U (Unwanted Cat)" : "", "UK Giftaid" : "", "URL" : "", "UUUUUUUUUU or UUUU = unique number" : "", "Unable to Afford" : "", "Unable to Cope" : "", "Unaltered" : "", "Unaltered Adopted Animals" : "", "Unaltered Dog - 1 year" : "", "Unaltered Dog - 3 year" : "", "Unavailable" : "", "Under {0} weeks old" : "", "Unit" : "", "Unit Price" : "", "Unit within the location, eg: pen or cage number" : "", "Units" : "", "Unknown" : "ไม่ทราบ", "Unknown microchip brand" : "", "Unpaid Fines" : "", "Unreserved" : "", "Unsaved Changes" : "", "Unspecified" : "", "Unsuitable Accomodation" : "", "Up for adoption" : "", "Upcoming medical items" : "", "Update" : "", "Update publishing options" : "", "Update system options" : "", "Update the daily boarding cost for this animal" : "", "Updated database to version {0}" : "", "Updated." : "", "Updating..." : "", "Upload" : "", "Upload Document" : "", "Upload ODT" : "", "Upload Photo" : "", "Upload a new OpenOffice template" : "", "Upload all available images for animals" : "", "Upload an SQL script" : "", "Upload splash.jpg and logo.jpg to override the login screen image and logo at the top left of ASM." : "", "Uploading..." : "", "Urgencies" : "", "Urgency" : "", "Urgent" : "", "Usage Date" : "", "Usage Type" : "", "Usage explains why this stock record was created or adjusted. Usage records will only be created if the balance changes." : "", "Use Automatic Insurance Numbers" : "", "Use HTML5 client side image scaling where available to speed up image uploads" : "", "Use SQL Interface" : "", "Use a single breed field" : "", "Use animal comments" : "", "Use fancy tooltips" : "", "Use notes from preferred photo" : "", "Use the icon in the lower right of notes fields to view them in a separate window." : "", "User Accounts" : "", "User Roles" : "", "User accounts that will only ever call the Service API should set this to No." : "", "User roles" : "", "Username" : "", "Username '{0}' already exists" : "", "Users" : "", "Users need a username, password and at least one role or the superuser flag setting." : "", "Vacation" : "ที่ตั้ง", "Vaccinate" : "", "Vaccinate Animal" : "", "Vaccination" : "", "Vaccination Book" : "", "Vaccination Given" : "ประเภทวัคซีน", "Vaccination Types" : "ประเภทวัคซีน", "Vaccination book" : "", "Vaccination marked as given for {0} - {1}" : "", "Vaccinations" : "", "Vaccinations need an animal and at least a required date." : "", "Vaccinations require an animal" : "", "Vaccinations: {0}, Tests: {1}, Medical Treatments: {2}, Transport: {3}, Costs: {4}, Total Costs: {5} Total Payments: {6}, Balance: {7}" : "", "Valid tokens for the subject and text" : "", "Value" : "", "Various" : "", "Vertical Pitch" : "", "Very Large" : "ใหญ่มาก", "Vet" : "", "Vet Visit" : "", "Victim" : "", "Victim Name" : "", "Video Link" : "", "Vietnamese Pot Bellied" : "", "View" : "", "View Accounts" : "", "View Animals" : "ดูสัตว์ต่างๆ", "View Audit Trail" : "", "View Citations" : "", "View Clinic Appointment" : "", "View Cost" : "", "View Diary" : "", "View Diets" : "", "View Document" : "", "View Document Repository" : "", "View Found Animal" : "สัตว์ที่พบเจอ", "View Incidents" : "", "View Incoming Forms" : "", "View Investigations" : "", "View Licenses" : "", "View Litter" : "", "View Log" : "ปูมบันทึกใหม่", "View Lost Animal" : "สัตว์ที่พบเจอ", "View Manual" : "", "View Media" : "", "View Medical Records" : "", "View Movement" : "", "View PDF" : "", "View Payments" : "", "View Person" : "", "View Person Links" : "", "View Report" : "", "View Roles" : "", "View Rota" : "", "View Shelter Animals" : "", "View Staff Person Records" : "", "View Stock" : "", "View Tests" : "", "View Training Videos" : "", "View Transport" : "", "View Trap Loans" : "", "View Vaccinations" : "", "View Volunteer Person Records" : "", "View Vouchers" : "", "View Waiting List" : "ดูรายชื่อที่กำลังรอ", "View animals matching publishing options" : "", "View littermates" : "", "View matching records" : "", "View media" : "", "View publishing logs" : "", "Visual Theme" : "", "Vizsla" : "", "Volunteer" : "", "Voucher Types" : "", "Vouchers" : "", "Vouchers need an issue and expiry date." : "", "WARNING: This animal has not been microchipped" : "", "WARNING: This animal is over 6 months old and has not been neutered/spayed" : "", "Waiting" : "รายชื่อที่กำลังคอย", "Waiting List" : "รายชื่อที่กำลังคอย", "Waiting List - Additional" : "", "Waiting List - Details" : "", "Waiting List - Removal" : "", "Waiting List Contact" : "", "Waiting List Donation" : "", "Waiting List {0}" : "รายชื่อที่กำลังรอ: {0}", "Waiting List: {0}" : "รายชื่อที่กำลังรอ: {0}", "Waiting Room" : "รายชื่อที่กำลังคอย", "Waiting for documents..." : "", "Waiting list donations" : "", "Waiting list entries matching '{0}'." : "", "Waiting list entries must have a contact" : "", "Waiting list entry for {0} ({1})" : "", "Waiting list entry successfully added." : "", "Waiting list urgency update period in days" : "", "Warmblood" : "", "Warn if the name of the new animal is similar to one entered recently" : "", "Warn when adopting an animal who has not been microchipped" : "", "Warn when adopting an unaltered animal" : "", "Warn when adopting to a person who has been banned from adopting animals" : "", "Warn when adopting to a person who has not been homechecked" : "", "Warn when adopting to a person who has previously brought an animal to the shelter" : "", "Warn when adopting to a person who lives in the same area as the original owner" : "", "Warn when creating multiple reservations on the same animal" : "", "Warnings" : "คำเตือน", "Wasted" : "", "Water Bills" : "", "We" : "", "Wed" : "", "Wednesday" : "", "Week" : "", "Week beginning {0}" : "", "Weekly" : "รายสัปดาห์", "Weight" : "น้ำหนัก", "Weimaraner" : "", "Welcome!" : "", "Welsh Corgi" : "", "Welsh Springer Spaniel" : "", "Welsh Terrier" : "", "West Highland White Terrier Westie" : "", "Wheaten Terrier" : "", "When" : "", "When ASM should stop showing this message" : "", "When I change the location of an animal, make a note of it in the log with this type" : "", "When I change the weight of an animal, make a note of it in the log with this type" : "", "When I generate a document, make a note of it in the log with this type" : "", "When I mark an animal held, make a note of it in the log with this type" : "", "When I set a new GDPR Opt-In contact option, make a note of it in the log with this type" : "", "When a message is created, email it to each matching user" : "", "When creating payments from the Move menu screens, mark them due instead of received" : "", "When displaying calendars, the first day of the week is" : "", "When displaying person names, use the format" : "", "When entering dates, hold down CTRL and use the cursor keys to move around the calendar. Press t to go to today." : "", "When entering vaccinations, default the last batch number and manufacturer for that type" : "", "When matching lost animals, include shelter animals" : "", "When publishing to third party services, add this extra text to the bottom of all animal descriptions" : "", "When receiving multiple payments, allow the due and received dates to be set" : "", "When receiving payments, allow a quantity and unit price to be set" : "", "When receiving payments, allow recording of sales tax with a default rate of" : "", "When receiving payments, allow the deposit account to be overridden" : "", "When you use Move > Adopt an animal, ASM will automatically return any open foster or retailer movement before creating the adoption." : "", "When you use Move > Foster an animal, ASM will automatically return any open foster movement before moving the animal to its new home." : "", "Where this animal is located within the shelter" : "", "Whippet" : "", "White" : "ขาว", "White German Shepherd" : "", "White and Black" : "ขาวและดำ", "White and Brindle" : "", "White and Brown" : "ขาวและน้ำตาล", "White and Grey" : "ขาวและเทา", "White and Liver" : "", "White and Tabby" : "", "White and Tan" : "", "White and Torti" : "", "Will this owner give a donation?" : "", "Wire-haired Pointing Griffon" : "", "Wirehaired Terrier" : "", "With Vet" : "", "With overnight batch" : "", "Withdrawal" : "", "Wk" : "", "Work" : "", "Work Phone" : "", "Work Types" : "", "XXX or XX = number unique for this year" : "", "Xoloitzcuintle/Mexican Hairless" : "", "YY or YYYY = current year" : "", "Yellow Labrador Retriever" : "", "Yellow and Grey" : "เหลืองและเทา", "Yes" : "ใช่", "Yes/No" : "ใช่/ไม่", "Yes/No/Unknown" : "", "Yorkshire Terrier Yorkie" : "", "You can bookmark search results, animals, people and most data entry screens." : "", "You can drag and drop animals in shelter view to change their locations." : "", "You can middle click a link to open it in a new browser tab (push the wheel on most modern mice)." : "", "You can override the search result sort by adding one of the following to the end of your search - sort:az, sort:za, sort:mr, sort:lr" : "", "You can prefix your term in the search box with a: to search only animals, p: to search only people, wl: to search waiting list entries, la: to search lost animals and fa: to search found animals." : "", "You can set a default amount for different payment types in the Settings- Lookup Data screen. Very handy when creating adoptions." : "", "You can sort tables by clicking on the column headings." : "", "You can upload images called logo.jpg and splash.jpg to the Settings- Reports-Extra Images screen to override the login splash screen and logo in the upper left corner of the application." : "", "You can use incoming forms to create new records or attach them to existing records." : "", "You can't have a return without a movement." : "", "You didn't specify any search criteria, so an on-shelter search was assumed." : "", "You have unsaved changes, are you sure you want to leave this page?" : "", "You must supply a code." : "", "Young Adult" : "", "Your CSV file should have a header row with field names ASM recognises." : "", "Your sheltermanager.com account is due to expire on {0}, please renew {1}" : "", "Zipcode" : "", "Zipcode contains" : "", "[None]" : "[ไม่มี]", "after connecting, chdir to" : "", "and" : "", "are sent to" : "", "at" : "", "cm" : "", "days" : "วัน", "estimate" : "", "filters: a:animal, p:person, wl:waitinglist, la:lostanimal, fa:foundanimal keywords: onshelter/os, notforadoption, aco, banned, donors, deceased, vets, retailers, staff, fosterers, volunteers, homecheckers, members, activelost, activefound" : "", "inches" : "", "invalid" : "", "kg" : "", "lb" : "", "less" : "", "mins" : "", "months" : "เดือน", "more" : "", "on" : "", "or" : "", "or estimated age in years" : "", "oz" : "", "to" : "", "today" : "", "treatments" : "", "treatments, every" : "", "weekdays" : "", "weeks" : "สัปดาห์", "weeks after last contact." : "", "years" : "ปี", "yesterday" : "", "{0} (under {1} months)" : "", "{0} - {1} ({2} {3} aged {4})" : "", "{0} - {1} {2}" : "", "{0} - {1} {2} ({3}), contact {4} ({5}) - lost in {6}, postcode {7}, on {8}" : "", "{0} animals successfully updated." : "", "{0} cannot be blank" : "", "{0} fine, paid" : "", "{0} fine, unpaid" : "", "{0} incurred in costs" : "", "{0} is running ({1}&#37; complete)." : "", "{0} payment records created." : "", "{0} received" : "", "{0} record(s) match the mail merge." : "", "{0} results." : "", "{0} rows affected." : "", "{0} selected" : "", "{0} treatments every {1} days" : "", "{0} treatments every {1} months" : "", "{0} treatments every {1} weekdays" : "", "{0} treatments every {1} weeks" : "", "{0} treatments every {1} years" : "", "{0} {1} ({2} treatments)" : "", "{0} {1} aged {2}" : "", "{0} {1} {2} aged {3}" : "", "{0} {1}: Moved from {2} to {3}" : "", "{0} {1}: adopted by {2}" : "", "{0} {1}: altered" : "", "{0} {1}: available for adoption" : "", "{0} {1}: died ({2})" : "", "{0} {1}: entered the shelter" : "", "{0} {1}: escaped" : "", "{0} {1}: euthanised ({2})" : "", "{0} {1}: fostered to {2}" : "", "{0} {1}: held" : "", "{0} {1}: microchipped" : "", "{0} {1}: not available for adoption" : "", "{0} {1}: quarantined" : "", "{0} {1}: received {2}" : "", "{0} {1}: reclaimed by {2}" : "", "{0} {1}: released" : "", "{0} {1}: reserved by {2}" : "", "{0} {1}: returned by {2}" : "", "{0} {1}: sent to retailer {2}" : "", "{0} {1}: stolen" : "", "{0} {1}: tested positive for FIV" : "", "{0} {1}: tested positive for FeLV" : "", "{0} {1}: tested positive for Heartworm" : "", "{0} {1}: transferred to {2}" : "", "{0}, Week {1}" : "", "{0}: Entered shelter {1}, Last changed on {2} by {3}. {4} {5} {6} aged {7}" : "", "{0}: closed {1} ({2})" : "", "{0}: opened {1}" : "", "{0}: waiting list - {1}" : "", "{0}: {1} {2} - {3} {4}" : "", "{2}: found in {1}: {0}" : "", "{2}: lost in {1}: {0}" : "", "{plural0} animal as dead on arrival" : "", "{plural0} animal control call due for followup today" : "", "{plural0} animal died" : "", "{plural0} animal entered the shelter" : "", "{plural0} animal has a hold ending today" : "", "{plural0} animal has been on the shelter longer than {0} months" : "", "{plural0} animal is not available for adoption" : "", "{plural0} animal was adopted" : "", "{plural0} animal was euthanized" : "", "{plural0} animal was reclaimed by its owner" : "", "{plural0} animal was transferred to another shelter" : "", "{plural0} day." : "", "{plural0} incomplete animal control call" : "", "{plural0} item of stock expires in the next month" : "", "{plural0} item of stock has expired" : "", "{plural0} medical treatment needs to be administered today" : "", "{plural0} month." : "", "{plural0} new online form submission" : "", "{plural0} person has an overdue payment" : "", "{plural0} person with an active reservation has not been homechecked" : "", "{plural0} potential match for a lost animal" : "", "{plural0} recent publisher run had errors" : "", "{plural0} reservation has been active over a week without adoption" : "", "{plural0} result found in {1} seconds. Order: {2}" : "", "{plural0} shelter animal has not been microchipped" : "", "{plural0} shelter animal has people looking for them" : "", "{plural0} test needs to be performed today" : "", "{plural0} transport does not have a driver assigned" : "", "{plural0} trap is overdue for return" : "", "{plural0} trial adoption has ended" : "", "{plural0} unaltered animal has been adopted in the last month" : "", "{plural0} undispatched animal control call" : "", "{plural0} unpaid fine" : "", "{plural0} urgent entry on the waiting list" : "", "{plural0} vaccination has expired" : "", "{plural0} vaccination needs to be administered today" : "", "{plural0} week." : "", "{plural0} year." : "", "{plural1} animal control calls due for followup today" : "", "{plural1} animals are not available for adoption" : "", "{plural1} animals died" : "", "{plural1} animals entered the shelter" : "", "{plural1} animals have been on the shelter longer than {0} months" : "", "{plural1} animals have holds ending today" : "", "{plural1} animals were adopted" : "", "{plural1} animals were dead on arrival" : "", "{plural1} animals were euthanized" : "", "{plural1} animals were reclaimed by their owners" : "", "{plural1} animals were transferred to other shelters" : "", "{plural1} days." : "", "{plural1} incomplete animal control calls" : "", "{plural1} items of stock expire in the next month" : "", "{plural1} items of stock have expired" : "", "{plural1} medical treatments need to be administered today" : "", "{plural1} months." : "", "{plural1} new online form submissions" : "", "{plural1} people have overdue payments" : "", "{plural1} people with active reservations have not been homechecked" : "", "{plural1} potential matches for lost animals" : "", "{plural1} recent publisher runs had errors" : "", "{plural1} reservations have been active over a week without adoption" : "", "{plural1} results found in {1} seconds. Order: {2}" : "", "{plural1} shelter animals have not been microchipped" : "", "{plural1} shelter animals have people looking for them" : "", "{plural1} tests need to be performed today" : "", "{plural1} transports do not have a driver assigned" : "", "{plural1} traps are overdue for return" : "", "{plural1} trial adoptions have ended" : "", "{plural1} unaltered animals have been adopted in the last month" : "", "{plural1} undispatched animal control calls" : "", "{plural1} unpaid fines" : "", "{plural1} urgent entries on the waiting list" : "", "{plural1} vaccinations have expired" : "", "{plural1} vaccinations need to be administered today" : "", "{plural1} weeks." : "", "{plural1} years." : "", "{plural2} animal control calls due for followup today" : "", "{plural2} animals are not available for adoption" : "", "{plural2} animals died" : "", "{plural2} animals entered the shelter" : "", "{plural2} animals have been on the shelter longer than {0} months" : "", "{plural2} animals have holds ending today" : "", "{plural2} animals were adopted" : "", "{plural2} animals were dead on arrival" : "", "{plural2} animals were euthanized" : "", "{plural2} animals were reclaimed by their owners" : "", "{plural2} animals were transferred to other shelters" : "", "{plural2} days." : "", "{plural2} incomplete animal control calls" : "", "{plural2} items of stock expire in the next month" : "", "{plural2} items of stock have expired" : "", "{plural2} medical treatments need to be administered today" : "", "{plural2} months." : "", "{plural2} new online form submissions" : "", "{plural2} people have overdue payments" : "", "{plural2} people with active reservations have not been homechecked" : "", "{plural2} potential matches for lost animals" : "", "{plural2} recent publisher runs had errors" : "", "{plural2} reservations have been active over a week without adoption" : "", "{plural2} results found in {1} seconds. Order: {2}" : "", "{plural2} shelter animals have not been microchipped" : "", "{plural2} shelter animals have people looking for them" : "", "{plural2} tests need to be performed today" : "", "{plural2} transports do not have a driver assigned" : "", "{plural2} traps are overdue for return" : "", "{plural2} trial adoptions have ended" : "", "{plural2} unaltered animals have been adopted in the last month" : "", "{plural2} undispatched animal control calls" : "", "{plural2} unpaid fines" : "", "{plural2} urgent entries on the waiting list" : "", "{plural2} vaccinations have expired" : "", "{plural2} vaccinations need to be administered today" : "", "{plural2} weeks." : "", "{plural2} years." : "", "{plural3} animal control calls due for followup today" : "", "{plural3} animals are not available for adoption" : "", "{plural3} animals died" : "", "{plural3} animals entered the shelter" : "", "{plural3} animals have been on the shelter longer than {0} months" : "", "{plural3} animals have holds ending today" : "", "{plural3} animals were adopted" : "", "{plural3} animals were dead on arrival" : "", "{plural3} animals were euthanized" : "", "{plural3} animals were reclaimed by their owners" : "", "{plural3} animals were transferred to other shelters" : "", "{plural3} days." : "", "{plural3} incomplete animal control calls" : "", "{plural3} items of stock expire in the next month" : "", "{plural3} items of stock have expired" : "", "{plural3} medical treatments need to be administered today" : "", "{plural3} months." : "", "{plural3} new online form submissions" : "", "{plural3} people have overdue payments" : "", "{plural3} people with active reservations have not been homechecked" : "", "{plural3} potential matches for lost animals" : "", "{plural3} recent publisher runs had errors" : "", "{plural3} reservations have been active over a week without adoption" : "", "{plural3} results found in {1} seconds. Order: {2}" : "", "{plural3} shelter animals have not been microchipped" : "", "{plural3} shelter animals have people looking for them" : "", "{plural3} tests need to be performed today" : "", "{plural3} transports do not have a driver assigned" : "", "{plural3} traps are overdue for return" : "", "{plural3} trial adoptions have ended" : "", "{plural3} unaltered animals have been adopted in the last month" : "", "{plural3} undispatched animal control calls" : "", "{plural3} unpaid fines" : "", "{plural3} urgent entries on the waiting list" : "", "{plural3} vaccinations have expired" : "", "{plural3} vaccinations need to be administered today" : "", "{plural3} weeks." : "", "{plural3} years." : "" }
bobintetley/asm3
src/asm3/locales/locale_th.py
Python
gpl-3.0
116,103
[ "Amber", "VisIt" ]
a10357f26361f3d3f870deea58cc282789eace3579befc40bf7a695845c19fe8
#!/usr/bin/env """ GOA_Winds_StormPatterns.py Compare Gorepoint/globec winds (along shore) to telleconnection indices GorePoint - 58deg 58min N, 150deg 56min W and Globec3 59.273701N, 148.9653W Files are created by GOA_Winds_NARR_model_prep.py -Filtered NARR winds with a triangular filter (1/4, 1/2, 1/4) and output every 3hrs -Provided U, V -Saved in EPIC NetCDF standard """ #System Stack import datetime import sys #Science Stack import numpy as np from scipy import stats # User Stack from utilities import ncutilities as ncutil # Visual Stack import matplotlib.pyplot as plt from matplotlib.dates import DateFormatter, MonthLocator __author__ = 'Shaun Bell' __email__ = 'shaun.bell@noaa.gov' __created__ = datetime.datetime(2014, 04, 29) __modified__ = datetime.datetime(2014, 04, 29) __version__ = "0.1.0" __status__ = "Development" __keywords__ = 'NARR','GLOBEC3', 'Gorept','AO/NAO/PNA', 'U,V','Winds', 'Gulf of Alaska' """------------------------General Modules-------------------------------------------""" def from_netcdf(infile): """ Uses ncreadfile_dic which returns a dictionary of all data from netcdf""" ###nc readin/out nchandle = ncutil.ncopen(infile) params = ncutil.get_vars(nchandle) #gets all of them ncdata = ncutil.ncreadfile_dic(nchandle, params) ncutil.ncclose(nchandle) return (ncdata, params) def date2pydate(file_time, file_time2=None, file_flag='EPIC'): """ Ingest EPIC date or NCEP Date and provide python serial date""" if file_flag == 'EPIC': ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23)) ref_time_epic = 2440000 offset = ref_time_epic - ref_time_py try: #if input is an array python_time = [None] * len(file_time) for i, val in enumerate(file_time): pyday = file_time[i] - offset pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day python_time[i] = (pyday + pyfrac) except: pyday = file_time - offset pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day python_time = (pyday + pyfrac) elif file_flag == 'NARR': """ Hours since 1800-1-1""" base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal() python_time = file_time / 24. + base_date elif file_flag == 'NCEP': """ Hours since 1800-1-1""" base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal() python_time = file_time / 24. + base_date elif file_flag == 'Index': """ yyyy mm dd""" python_time=datetime.datetime.strptime(file_time,'%Y %m %d').toordinal() else: print "time flag not recognized" sys.exit() return python_time """------------------------- MATH Modules -------------------------------------------""" def hourly_2_ave(ltbound,utbound, time, data, time_base=6.): """ bin average times into specified bins """ interval = time_base / 24. tarray = np.arange(ltbound, utbound,interval) dmean = np.zeros_like(tarray) * np.nan dstd = np.zeros_like(tarray) * np.nan for i, val in enumerate(tarray): ind = (time >= val) & (time < val+interval) dmean[i] = data[ind].mean() dstd[i] = data[ind].std() return { 'dtime':tarray, 'dmean':dmean ,'dstd':dstd,} def rotate_coord(angle_rot, mag, direct): """ converts math coords to along/cross shelf. + onshore / along coast with land to right (right handed) - offshore / along coast with land to left Todo: convert met standard for winds (left handed coordinate system """ direct = direct - angle_rot along = mag * np.sin(np.deg2rad(direct)) cross = mag * np.cos(np.deg2rad(direct)) return (along, cross) def lin_fit(x, y): """ scipy linear regression routine""" slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) return ( slope, intercept, r_value, p_value, std_err ) def moving_average(x, n, type='simple'): """ compute an n period moving average. type is 'simple' | 'exponential' """ #x = np.asarray(x) if type=='simple': weights = np.ones(n) else: weights = np.exp(np.linspace(-1., 0., n)) weights /= weights.sum() a = np.convolve(x, weights, mode='full')[:len(x)] a[:n] = a[n] return a """------------------------- Main Modules -------------------------------------------""" ### READ AO/PNA indices from txt file AO_file = '/Users/bell/Data_Local/teleconnections/norm.daily.ao.index.b500101.current.ascii' PNA_file = '/Users/bell/Data_Local/teleconnections/norm.daily.pna.index.b500101.current.ascii' # ingest indicies PNA_index, PNA_time = [], [] #some missing ind with open(PNA_file, 'rb') as f: for k, line in enumerate(f.readlines()): PNA_index = PNA_index + [line.strip().split()[-1]] PNA_time = PNA_time + [date2pydate(" ".join(line.strip().split()[:-1]), file_flag='Index')] PNA_index= np.array(PNA_index, float) PNA_time= np.array(PNA_time) AO_index, AO_time = [], [] #some missing ind with open(AO_file, 'rb') as f: for k, line in enumerate(f.readlines()): AO_index = AO_index + [line.strip().split()[-1]] AO_time = AO_time + [date2pydate(" ".join(line.strip().split()[:-1]), file_flag='Index')] AO_index= np.array(AO_index, float) AO_time= np.array(AO_time) ### NARR wind files (preprocessed) for specific locations - winds have a triangle filter on them NARR = '/Users/bell/Programs/Python/FOCI_Analysis/GOA_Winds/data/' station_name = ['Globec3','GorePt'] sta_lat = [59.273701,58.9666666666666667] sta_long = [148.9653,150.9333333333333333] #loop over all requested data years = range(1984, 2014,1) NARR_time = [] NARR_uwnd = [] NARR_vwnd = [] for iyear in years: globec3_data, NARRkeys = from_netcdf(NARR+'NARR_globec_'+str(iyear)+'.nc') NARR_time = NARR_time + date2pydate(globec3_data['time'], globec3_data['time2']) NARR_uwnd = np.append(NARR_uwnd, globec3_data['WU_422'][:,0,0,0]) NARR_vwnd = np.append(NARR_vwnd, globec3_data['WV_423'][:,0,0,0]) NARR_time = np.array(NARR_time) ### daily averages time_bin = 24. NARRDaily_uwnd = hourly_2_ave(NARR_time.min(),NARR_time.max(), NARR_time, NARR_uwnd, time_base=time_bin) NARRDaily_vwnd = hourly_2_ave(NARR_time.min(),NARR_time.max(), NARR_time, NARR_vwnd, time_base=time_bin) NARR_wndmag = np.sqrt((NARRDaily_uwnd['dmean']**2)+(NARRDaily_vwnd['dmean']**2)) NARR_wind_dir_math = np.rad2deg(np.arctan2(NARRDaily_vwnd['dmean'] , NARRDaily_uwnd['dmean'])) NARR_along,NARR_across = rotate_coord(120., NARR_wndmag, NARR_wind_dir_math) """----""" # Calculate correlations for 3month spans corr_PNA = {} corr_AO = {} for drange in range(1980,2014,1): for mrange in range(1,12,3): start_ind = datetime.datetime.strptime(str(drange) + ' ' + str(mrange) + ' 01','%Y %m %d').toordinal() end_ind = datetime.datetime.strptime(str(drange) + ' ' + str(mrange+2) + ' 01','%Y %m %d').toordinal() PNA_ind = (PNA_time >= start_ind) & (PNA_time <= end_ind) AO_ind = (AO_time >= start_ind) & (AO_time <= end_ind) NARR_ind = (NARRDaily_uwnd['dtime'] >= start_ind) & (NARRDaily_uwnd['dtime'] <= end_ind) # NARR_along_stand = (NARR_along - np.nanmean(NARR_along)) / np.nanstd(NARR_along) # NARR_along_stand = (NARR_along - np.nanmin(NARR_along)) / (np.nanmax(NARR_along) - np.nanmin(NARR_along)) #actually normalized - ignor var name # PNA_index_stand = (PNA_index - np.nanmean(PNA_index)) / (np.nanstd(PNA_index)) # PNA_index_stand = (PNA_index - np.nanmin(PNA_index)) / (np.nanmax(PNA_index) - np.nanmin(PNA_index)) #actually normalized - ignor var name if not np.size(NARR_along[NARR_ind]) == 0: #(slope, intercept, r_value, p_value, std_err) = lin_fit(NARR_along_stand[NARR_ind], PNA_index[PNA_ind]) #corr[start_ind] = r_value**2 corr_PNA[start_ind] = np.corrcoef(NARR_along[NARR_ind], PNA_index[PNA_ind])[0][1] corr_AO[start_ind] = np.corrcoef(NARR_along[NARR_ind], AO_index[PNA_ind])[0][1] else: corr_PNA[start_ind] = 0.0 corr_AO[start_ind] = 0.0 # 30day running filter for wind timeseries NARR_along_rm = moving_average(NARR_along,(30)) AO_index_rm = moving_average(AO_index,(30)) PNA_index_rm = moving_average(PNA_index,(30)) """------------------------- Plotting Modules -------------------------------------------""" year_bounds = [[datetime.datetime.strptime('1980 01 01','%Y %m %d').toordinal(), datetime.datetime.strptime('1985 01 01','%Y %m %d').toordinal(), datetime.datetime.strptime('1990 01 01','%Y %m %d').toordinal(), datetime.datetime.strptime('1995 01 01','%Y %m %d').toordinal(), datetime.datetime.strptime('2000 01 01','%Y %m %d').toordinal(), datetime.datetime.strptime('2005 01 01','%Y %m %d').toordinal(), datetime.datetime.strptime('2010 01 01','%Y %m %d').toordinal()], [datetime.datetime.strptime('1985 01 01','%Y %m %d').toordinal(), datetime.datetime.strptime('1990 01 01','%Y %m %d').toordinal(), datetime.datetime.strptime('1995 01 01','%Y %m %d').toordinal(), datetime.datetime.strptime('2000 01 01','%Y %m %d').toordinal(), datetime.datetime.strptime('2005 01 01','%Y %m %d').toordinal(), datetime.datetime.strptime('2010 01 01','%Y %m %d').toordinal(), datetime.datetime.strptime('2015 01 01','%Y %m %d').toordinal()]] fig = plt.figure() for splot in range(0,7,1): ax1 = plt.subplot(7,1,splot+1) plt.plot(NARRDaily_uwnd['dtime'], NARR_along_rm, 'r') for i,kk in enumerate(corr_PNA.keys()): if (corr_PNA[kk]) >=.2 and (corr_PNA[kk]) <=.3: fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.1) elif (corr_PNA[kk]) >=.3 and (corr_PNA[kk]) <=.4: fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.3) elif (corr_PNA[kk]) >=.4 and (corr_PNA[kk]) <=.5: fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.5) elif (corr_PNA[kk]) >=.5: fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.8) elif (corr_PNA[kk]) <=-0.2 and (corr_PNA[kk]) >=-0.3: fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.1) elif (corr_PNA[kk]) <=-0.3 and (corr_PNA[kk]) >=-0.4: fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.3) elif (corr_PNA[kk]) <=-0.4 and (corr_PNA[kk]) >=-0.5: fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.5) elif (corr_PNA[kk]) <=-0.5: fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.8) ax1.set_ylim((-10,10)) ax2 = ax1.twinx() plt.plot(PNA_time, PNA_index_rm, 'b') ax2.xaxis.set_major_formatter(DateFormatter('%b %Y')) ax2.set_ylim((-3,3)) ax2.set_xlim(year_bounds[0][splot],year_bounds[1][splot]) ax2.xaxis.set_major_locator(MonthLocator(bymonth=[3,10], bymonthday=1)) fig.suptitle('NARR Along-Shore Winds corr PNA Index at Globec3') DefaultSize = fig.get_size_inches() fig.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]*2) ) plt.savefig('NARR_along_PNA_globec.png', bbox_inches='tight', dpi = (100)) plt.close() fig = plt.figure() for splot in range(0,7,1): ax1 = plt.subplot(7,1,splot+1) plt.plot(NARRDaily_uwnd['dtime'], NARR_along_rm, 'r') for i,kk in enumerate(corr_AO.keys()): if (corr_AO[kk]) >=.2 and (corr_AO[kk]) <=.3: fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.1) elif (corr_AO[kk]) >=.3 and (corr_AO[kk]) <=.4: fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.3) elif (corr_AO[kk]) >=.4 and (corr_AO[kk]) <=.5: fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.5) elif (corr_AO[kk]) >=.5: fill1 = ax1.axvspan(kk, kk+90, color='k', alpha=0.8) elif (corr_AO[kk]) <=-0.2 and (corr_AO[kk]) >=-0.3: fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.1) elif (corr_AO[kk]) <=-0.3 and (corr_AO[kk]) >=-0.4: fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.3) elif (corr_AO[kk]) <=-0.4 and (corr_AO[kk]) >=-0.5: fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.5) elif (corr_AO[kk]) <=-0.5: fill1 = ax1.axvspan(kk, kk+90, color='r', alpha=0.8) ax1.set_ylim((-10,10)) ax2 = ax1.twinx() plt.plot(AO_time, AO_index_rm, 'b') ax2.xaxis.set_major_formatter(DateFormatter('%b %Y')) ax2.set_ylim((-3,3)) ax2.set_xlim(year_bounds[0][splot],year_bounds[1][splot]) ax2.xaxis.set_major_locator(MonthLocator(bymonth=[3,10], bymonthday=1)) fig.suptitle('NARR Along-Shore Winds corr AO Index at Globec3') DefaultSize = fig.get_size_inches() fig.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]*2) ) plt.savefig('NARR_along_AO_globec.png', bbox_inches='tight', dpi = (100)) plt.close()
shaunwbell/FOCI_Analysis
ReanalysisRetreival_orig/GOA_Winds/GOA_Winds_StormPatterns.py
Python
mit
13,331
[ "NetCDF" ]
18d9619d28cd7286277d7c7035f8e04ffc634c8ae599237827b86ebe7e21b4b3
# $HeadURL$ __RCSID__ = "$Id$" import threading class Synchronizer: """ Class encapsulating a lock allowing it to be used as a synchronizing decorator making the call thread-safe""" def __init__( self, lockName = "", recursive = False ): from DIRAC.Core.Utilities.LockRing import LockRing self.__lockName = lockName self.__lr = LockRing() self.__lock = self.__lr.getLock( lockName, recursive = recursive ) def __call__( self, funcToCall ): def lockedFunc( *args, **kwargs ): try: if self.__lockName: print "LOCKING", self.__lockName self.__lock.acquire() return funcToCall(*args, **kwargs) finally: if self.__lockName: print "UNLOCKING", self.__lockName self.__lock.release() return lockedFunc def lock(self): return self.__lock.acquire() def unlock(self): return self.__lock.release() #FIXME: not used class WORM: """ Write One - Read Many """ def __init__( self, maxReads = 10 ): from DIRAC.Core.Utilities.LockRing import LockRing self.__lr = LockRing() self.__lock = self.__lr.getLock() self.__maxReads = maxReads self.__semaphore = threading.Semaphore( maxReads ) def write( self, funcToCall ): """ Write decorator """ def __doWriteLock( *args, **kwargs ): try: self.__startWriteZone() return funcToCall(*args, **kwargs) finally: self.__endWriteZone() return __doWriteLock def read( self, funcToCall ): """ Read decorator """ def __doReadLock( *args, **kwargs ): try: self.__startReadZone() return funcToCall(*args, **kwargs) finally: self.__endReadZone() return __doReadLock def __startWriteZone(self): """ Locks Event to prevent further threads from reading. Stops current thread until no other thread is accessing. PRIVATE USE """ self.__lock.acquire() for i in range( self.__maxReads ): self.__semaphore.acquire() self.__lock.release() def __endWriteZone(self): """ Unlocks Event. PRIVATE USE """ for i in range( self.__maxReads ): self.__semaphore.release() def __startReadZone(self): """ Start of danger zone. This danger zone may be or may not be a mutual exclusion zone. Counter is maintained to know how many threads are inside and be able to enable and disable mutual exclusion. PRIVATE USE """ self.__semaphore.acquire() def __endReadZone( self ): """ End of danger zone. PRIVATE USE """ self.__semaphore.release()
Sbalbp/DIRAC
Core/Utilities/ThreadSafe.py
Python
gpl-3.0
2,622
[ "DIRAC" ]
26b59175bdc992f0100de8eae9153cddcb30255e9384618178e1a6f9d5c0a29e
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from argcomplete.completers import FilesCompleter from knack.arguments import CLIArgumentType from azure.cli.core.commands.parameters import (resource_group_name_type, get_location_type, get_resource_name_completion_list, file_type, get_three_state_flag, get_enum_type, tags_type) from azure.cli.core.util import get_file_json from azure.cli.core.local_context import LocalContextAttribute, LocalContextAction from azure.cli.command_modules.appservice._appservice_utils import MSI_LOCAL_ID from azure.mgmt.web.models import DatabaseType, ConnectionStringType, BuiltInAuthenticationProvider, AzureStorageType from ._completers import get_hostname_completion_list from ._constants import FUNCTIONS_VERSIONS, FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS from ._validators import (validate_timeout_value, validate_site_create, validate_asp_create, validate_add_vnet, validate_front_end_scale_factor, validate_ase_create, validate_ip_address, validate_service_tag, validate_public_cloud) AUTH_TYPES = { 'AllowAnonymous': 'na', 'LoginWithAzureActiveDirectory': BuiltInAuthenticationProvider.azure_active_directory, 'LoginWithFacebook': BuiltInAuthenticationProvider.facebook, 'LoginWithGoogle': BuiltInAuthenticationProvider.google, 'LoginWithMicrosoftAccount': BuiltInAuthenticationProvider.microsoft_account, 'LoginWithTwitter': BuiltInAuthenticationProvider.twitter} MULTI_CONTAINER_TYPES = ['COMPOSE', 'KUBE'] FTPS_STATE_TYPES = ['AllAllowed', 'FtpsOnly', 'Disabled'] OS_TYPES = ['Windows', 'Linux'] LINUX_RUNTIMES = ['dotnet', 'node', 'python', 'java'] WINDOWS_RUNTIMES = ['dotnet', 'node', 'java', 'powershell'] ACCESS_RESTRICTION_ACTION_TYPES = ['Allow', 'Deny'] ASE_LOADBALANCER_MODES = ['Internal', 'External'] ASE_KINDS = ['ASEv2', 'ASEv3'] ASE_OS_PREFERENCE_TYPES = ['Windows', 'Linux'] # pylint: disable=too-many-statements, too-many-lines def load_arguments(self, _): # pylint: disable=line-too-long # PARAMETER REGISTRATION name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME') sku_arg_type = CLIArgumentType( help='The pricing tiers, e.g., F1(Free), D1(Shared), B1(Basic Small), B2(Basic Medium), B3(Basic Large), S1(Standard Small), P1V2(Premium V2 Small), P1V3(Premium V3 Small), P2V3(Premium V3 Medium), P3V3(Premium V3 Large), PC2 (Premium Container Small), PC3 (Premium Container Medium), PC4 (Premium Container Large), I1 (Isolated Small), I2 (Isolated Medium), I3 (Isolated Large), I1v2 (Isolated V2 Small), I2v2 (Isolated V2 Medium), I3v2 (Isolated V2 Large), WS1 (Logic Apps Workflow Standard 1), WS2 (Logic Apps Workflow Standard 2), WS3 (Logic Apps Workflow Standard 3)', arg_type=get_enum_type( ['F1', 'FREE', 'D1', 'SHARED', 'B1', 'B2', 'B3', 'S1', 'S2', 'S3', 'P1V2', 'P2V2', 'P3V2', 'P1V3', 'P2V3', 'P3V3', 'PC2', 'PC3', 'PC4', 'I1', 'I2', 'I3', 'I1v2', 'I2v2', 'I3v2', 'WS1', 'WS2', 'WS3'])) webapp_name_arg_type = CLIArgumentType(configured_default='web', options_list=['--name', '-n'], metavar='NAME', completer=get_resource_name_completion_list('Microsoft.Web/sites'), id_part='name', help="name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`", local_context_attribute=LocalContextAttribute(name='web_name', actions=[ LocalContextAction.GET])) functionapp_name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME', help="name of the function app.", local_context_attribute=LocalContextAttribute(name='functionapp_name', actions=[ LocalContextAction.GET])) logicapp_name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME', help="name of the logic app.", local_context_attribute=LocalContextAttribute(name='logicapp_name', actions=[LocalContextAction.GET])) name_arg_type_dict = { 'functionapp': functionapp_name_arg_type, 'logicapp': logicapp_name_arg_type } isolated_sku_arg_type = CLIArgumentType( help='The Isolated pricing tiers, e.g., I1 (Isolated Small), I2 (Isolated Medium), I3 (Isolated Large)', arg_type=get_enum_type(['I1', 'I2', 'I3'])) static_web_app_sku_arg_type = CLIArgumentType( help='The pricing tiers for Static Web App', arg_type=get_enum_type(['Free', 'Standard']) ) functionapp_runtime_strings, functionapp_runtime_to_version_strings = _get_functionapp_runtime_versions() # use this hidden arg to give a command the right instance, that functionapp commands # work on function app and webapp ones work on web app with self.argument_context('webapp') as c: c.ignore('app_instance') c.argument('resource_group_name', arg_type=resource_group_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified") c.argument('name', arg_type=webapp_name_arg_type) with self.argument_context('appservice') as c: c.argument('resource_group_name', arg_type=resource_group_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx)) with self.argument_context('appservice list-locations') as c: c.argument('linux_workers_enabled', action='store_true', help='get regions which support hosting web apps on Linux workers') c.argument('sku', arg_type=sku_arg_type) with self.argument_context('appservice plan') as c: c.argument('name', arg_type=name_arg_type, help='The name of the app service plan', completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'), configured_default='appserviceplan', id_part='name', local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET])) c.argument('admin_site_name', help='The name of the admin web app.', deprecate_info=c.deprecate(expiration='0.2.17')) c.ignore('max_burst') with self.argument_context('appservice plan create') as c: c.argument('name', options_list=['--name', '-n'], help="Name of the new app service plan", completer=None, validator=validate_asp_create, local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.SET], scopes=['appservice', 'webapp', 'functionapp'])) c.argument('number_of_workers', help='Number of workers to be allocated.', type=int, default=1) c.argument('app_service_environment', options_list=['--app-service-environment', '-e'], help="Name or ID of the app service environment", local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET])) c.argument('sku', arg_type=sku_arg_type) c.argument('is_linux', action='store_true', required=False, help='host web app on Linux worker') c.argument('hyper_v', action='store_true', required=False, help='Host web app on Windows container') c.argument('per_site_scaling', action='store_true', required=False, help='Enable per-app scaling at the ' 'App Service plan level to allow for ' 'scaling an app independently from ' 'the App Service plan that hosts it.') c.argument('zone_redundant', options_list=['--zone-redundant', '-z'], help='Enable zone redundancy for high availability. Cannot be changed after plan creation. Minimum instance count is 3.') c.argument('tags', arg_type=tags_type) with self.argument_context('appservice plan update') as c: c.argument('sku', arg_type=sku_arg_type) c.argument('elastic_scale', arg_type=get_three_state_flag(), is_preview=True, help='Enable or disable automatic scaling. Set to "true" to enable elastic scale for this plan, or "false" to disable elastic scale for this plan. The SKU must be a Premium V2 SKU (P1V2, P2V2, P3V2) or a Premium V3 SKU (P1V3, P2V3, P3V3)') c.argument('max_elastic_worker_count', options_list=['--max-elastic-worker-count', '-m'], type=int, is_preview=True, help='Maximum number of instances that the plan can scale out to. The plan must be an elastic scale plan.') c.argument('number_of_workers', type=int, help='Number of workers to be allocated.') c.ignore('allow_pending_state') with self.argument_context('appservice plan delete') as c: c.argument('name', arg_type=name_arg_type, help='The name of the app service plan', completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'), configured_default='appserviceplan', id_part='name', local_context_attribute=None) with self.argument_context('webapp create') as c: c.argument('name', options_list=['--name', '-n'], help='name of the new web app', validator=validate_site_create, local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.SET], scopes=['webapp', 'cupertino'])) c.argument('startup_file', help="Linux only. The web's startup file") c.argument('docker_registry_server_user', options_list=['--docker-registry-server-user', '-s'], help='the container registry server username') c.argument('docker_registry_server_password', options_list=['--docker-registry-server-password', '-w'], help='The container registry server password. Required for private registries.') c.argument('multicontainer_config_type', options_list=['--multicontainer-config-type'], help="Linux only.", arg_type=get_enum_type(MULTI_CONTAINER_TYPES)) c.argument('multicontainer_config_file', options_list=['--multicontainer-config-file'], help="Linux only. Config file for multicontainer apps. (local or remote)") c.argument('runtime', options_list=['--runtime', '-r'], help="canonicalized web runtime in the format of Framework|Version, e.g. \"PHP|7.2\". Allowed delimiters: \"|\" or \":\". " "Use `az webapp list-runtimes` for available list") # TODO ADD completer c.argument('plan', options_list=['--plan', '-p'], configured_default='appserviceplan', completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'), help="name or resource id of the app service plan. Use 'appservice plan create' to get one", local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET])) c.argument('vnet', help="Name or resource ID of the regional virtual network. If there are multiple vnets of the same name across different resource groups, use vnet resource id to specify which vnet to use. If vnet name is used, by default, the vnet in the same resource group as the webapp will be used. Must be used with --subnet argument.") c.argument('subnet', help="Name or resource ID of the pre-existing subnet to have the webapp join. The --vnet is argument also needed if specifying subnet by name.") c.ignore('language') c.ignore('using_webapp_up') with self.argument_context('webapp show') as c: c.argument('name', arg_type=webapp_name_arg_type) with self.argument_context('webapp list-instances') as c: c.argument('name', arg_type=webapp_name_arg_type, id_part=None) c.argument('slot', options_list=['--slot', '-s'], help='Name of the web app slot. Default to the productions slot if not specified.') with self.argument_context('webapp list-runtimes') as c: c.argument('linux', action='store_true', help='list runtime stacks for linux based web apps') with self.argument_context('webapp deleted list') as c: c.argument('name', arg_type=webapp_name_arg_type, id_part=None) c.argument('slot', options_list=['--slot', '-s'], help='Name of the deleted web app slot.') with self.argument_context('webapp deleted restore') as c: c.argument('deleted_id', options_list=['--deleted-id'], help='Resource ID of the deleted web app') c.argument('name', options_list=['--name', '-n'], help='name of the web app to restore the deleted content to') c.argument('slot', options_list=['--slot', '-s'], help='slot to restore the deleted content to') c.argument('restore_content_only', action='store_true', help='restore only deleted files without web app settings') with self.argument_context('webapp traffic-routing') as c: c.argument('distribution', options_list=['--distribution', '-d'], nargs='+', help='space-separated slot routings in a format of `<slot-name>=<percentage>` e.g. staging=50. Unused traffic percentage will go to the Production slot') with self.argument_context('webapp update') as c: c.argument('client_affinity_enabled', help="Enables sending session affinity cookies.", arg_type=get_three_state_flag(return_label=True)) c.argument('https_only', help="Redirect all traffic made to an app using HTTP to HTTPS.", arg_type=get_three_state_flag(return_label=True)) c.argument('force_dns_registration', help="If true, web app hostname is force registered with DNS", arg_type=get_three_state_flag(return_label=True), deprecate_info=c.deprecate(expiration='3.0.0')) c.argument('skip_custom_domain_verification', help="If true, custom (non *.azurewebsites.net) domains associated with web app are not verified", arg_type=get_three_state_flag(return_label=True), deprecate_info=c.deprecate(expiration='3.0.0')) c.argument('ttl_in_seconds', help="Time to live in seconds for web app's default domain name", arg_type=get_three_state_flag(return_label=True), deprecate_info=c.deprecate(expiration='3.0.0')) c.argument('skip_dns_registration', help="If true web app hostname is not registered with DNS on creation", arg_type=get_three_state_flag(return_label=True), deprecate_info=c.deprecate(expiration='3.0.0')) c.argument('minimum_elastic_instance_count', options_list=["--minimum-elastic-instance-count", "-i"], type=int, is_preview=True, help="Minimum number of instances. App must be in an elastic scale App Service Plan.") c.argument('prewarmed_instance_count', options_list=["--prewarmed-instance-count", "-w"], type=int, is_preview=True, help="Number of preWarmed instances. App must be in an elastic scale App Service Plan.") with self.argument_context('webapp browse') as c: c.argument('logs', options_list=['--logs', '-l'], action='store_true', help='Enable viewing the log stream immediately after launching the web app') with self.argument_context('webapp delete') as c: c.argument('name', arg_type=webapp_name_arg_type, local_context_attribute=None) c.argument('keep_empty_plan', action='store_true', help='keep empty app service plan') c.argument('keep_metrics', action='store_true', help='keep app metrics') c.argument('keep_dns_registration', action='store_true', help='keep DNS registration', deprecate_info=c.deprecate(expiration='3.0.0')) with self.argument_context('webapp webjob') as c: c.argument('webjob_name', help='The name of the webjob', options_list=['--webjob-name', '-w']) with self.argument_context('webapp webjob continuous list') as c: c.argument('name', arg_type=webapp_name_arg_type, id_part=None) with self.argument_context('webapp webjob triggered list') as c: c.argument('name', arg_type=webapp_name_arg_type, id_part=None) for scope in ['webapp', 'functionapp', 'logicapp']: with self.argument_context(scope + ' create') as c: c.argument('deployment_container_image_name', options_list=['--deployment-container-image-name', '-i'], help='Container image name from Docker Hub, e.g. publisher/image-name:tag') c.argument('deployment_local_git', action='store_true', options_list=['--deployment-local-git', '-l'], help='enable local git') c.argument('deployment_zip', options_list=['--deployment-zip', '-z'], help='perform deployment using zip file') c.argument('deployment_source_url', options_list=['--deployment-source-url', '-u'], help='Git repository URL to link with manual integration') c.argument('deployment_source_branch', options_list=['--deployment-source-branch', '-b'], help='the branch to deploy') c.argument('tags', arg_type=tags_type) for scope in ['webapp', 'functionapp']: with self.argument_context(scope) as c: c.argument('assign_identities', nargs='*', options_list=['--assign-identity'], help='accept system or user assigned identities separated by spaces. Use \'[system]\' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples') c.argument('scope', options_list=['--scope'], help="Scope that the system assigned identity can access") c.argument('role', options_list=['--role'], help="Role name or id the system assigned identity will have") with self.argument_context(scope + ' config ssl bind') as c: c.argument('ssl_type', help='The ssl cert type', arg_type=get_enum_type(['SNI', 'IP'])) with self.argument_context(scope + ' config ssl upload') as c: c.argument('certificate_password', help='The ssl cert password') c.argument('certificate_file', type=file_type, help='The filepath for the .pfx file') c.argument('slot', options_list=['--slot', '-s'], help='The name of the slot. Default to the productions slot if not specified') with self.argument_context(scope + ' config ssl') as c: c.argument('certificate_thumbprint', help='The ssl cert thumbprint') with self.argument_context(scope + ' config appsettings') as c: c.argument('settings', nargs='+', help="space-separated app settings in a format of `<name>=<value>`") c.argument('setting_names', nargs='+', help="space-separated app setting names") with self.argument_context(scope + ' config ssl import') as c: c.argument('key_vault', help='The name or resource ID of the Key Vault') c.argument('key_vault_certificate_name', help='The name of the certificate in Key Vault') with self.argument_context(scope + ' config ssl create') as c: c.argument('hostname', help='The custom domain name') c.argument('name', options_list=['--name', '-n'], help='Name of the web app.') c.argument('resource-group', options_list=['--resource-group', '-g'], help='Name of resource group.') with self.argument_context(scope + ' config ssl show') as c: c.argument('certificate_name', help='The name of the certificate') with self.argument_context(scope + ' config hostname') as c: c.argument('hostname', completer=get_hostname_completion_list, help="hostname assigned to the site, such as custom domains", id_part='child_name_1') with self.argument_context(scope + ' deployment user') as c: c.argument('user_name', help='user name') c.argument('password', help='password, will prompt if not specified') with self.argument_context(scope + ' deployment source') as c: c.argument('manual_integration', action='store_true', help='disable automatic sync between source control and web') c.argument('repo_url', options_list=['--repo-url', '-u'], help='repository url to pull the latest source from, e.g. https://github.com/foo/foo-web') c.argument('branch', help='the branch name of the repository') c.argument('repository_type', help='repository type', arg_type=get_enum_type(['git', 'mercurial', 'github', 'externalgit', 'localgit'])) c.argument('git_token', help='Git access token required for auto sync') c.argument('github_action', options_list=['--github-action'], help='If using github action, default to False') with self.argument_context(scope + ' identity') as c: c.argument('scope', help="The scope the managed identity has access to") c.argument('role', help="Role name or id the managed identity will be assigned") with self.argument_context(scope + ' identity assign') as c: c.argument('assign_identities', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID)) with self.argument_context(scope + ' identity remove') as c: c.argument('remove_identities', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID)) with self.argument_context(scope + ' deployment source config-zip') as c: c.argument('src', help='a zip file path for deployment') c.argument('build_remote', help='enable remote build during deployment', arg_type=get_three_state_flag(return_label=True)) c.argument('timeout', type=int, options_list=['--timeout', '-t'], help='Configurable timeout in seconds for checking the status of deployment', validator=validate_timeout_value) with self.argument_context(scope + ' config appsettings list') as c: c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type), id_part=None) with self.argument_context(scope + ' config hostname list') as c: c.argument('webapp_name', arg_type=webapp_name_arg_type, id_part=None, options_list='--webapp-name') with self.argument_context(scope + ' cors') as c: c.argument('allowed_origins', options_list=['--allowed-origins', '-a'], nargs='*', help='space separated origins that should be allowed to make cross-origin calls (for example: http://example.com:12345). To allow all, use "*" and remove all other origins from the list') with self.argument_context(scope + ' config set') as c: c.argument('number_of_workers', help='The number of workers to be allocated.', type=int) c.argument('remote_debugging_enabled', help='enable or disable remote debugging', arg_type=get_three_state_flag(return_label=True)) c.argument('web_sockets_enabled', help='enable or disable web sockets', arg_type=get_three_state_flag(return_label=True)) c.argument('always_on', help='ensure web app gets loaded all the time, rather unloaded after been idle. Recommended when you have continuous web jobs running', arg_type=get_three_state_flag(return_label=True)) c.argument('auto_heal_enabled', help='enable or disable auto heal', arg_type=get_three_state_flag(return_label=True)) c.argument('use32_bit_worker_process', options_list=['--use-32bit-worker-process'], help='use 32 bits worker process or not', arg_type=get_three_state_flag(return_label=True)) c.argument('php_version', help='The version used to run your web app if using PHP, e.g., 5.5, 5.6, 7.0') c.argument('python_version', help='The version used to run your web app if using Python, e.g., 2.7, 3.4') c.argument('net_framework_version', help="The version used to run your web app if using .NET Framework, e.g., 'v4.0' for .NET 4.6 and 'v3.0' for .NET 3.5") c.argument('linux_fx_version', help="The runtime stack used for your linux-based webapp, e.g., \"RUBY|2.5.5\", \"NODE|10.14\", \"PHP|7.2\", \"DOTNETCORE|2.1\". See https://aka.ms/linux-stacks for more info.") c.argument('windows_fx_version', help="A docker image name used for your windows container web app, e.g., microsoft/nanoserver:ltsc2016") if scope == 'functionapp': c.ignore('windows_fx_version') c.argument('pre_warmed_instance_count', options_list=['--prewarmed-instance-count'], help="Number of pre-warmed instances a function app has") if scope == 'webapp': c.ignore('reserved_instance_count') c.argument('java_version', help="The version used to run your web app if using Java, e.g., '1.7' for Java 7, '1.8' for Java 8") c.argument('java_container', help="The java container, e.g., Tomcat, Jetty") c.argument('java_container_version', help="The version of the java container, e.g., '8.0.23' for Tomcat") c.argument('min_tls_version', help="The minimum version of TLS required for SSL requests, e.g., '1.0', '1.1', '1.2'") c.argument('http20_enabled', help="configures a web site to allow clients to connect over http2.0.", arg_type=get_three_state_flag(return_label=True)) c.argument('app_command_line', options_list=['--startup-file'], help="The startup file for linux hosted web apps, e.g. 'process.json' for Node.js web") c.argument('ftps_state', help="Set the Ftps state value for an app. Default value is 'AllAllowed'.", arg_type=get_enum_type(FTPS_STATE_TYPES)) c.argument('vnet_route_all_enabled', help="Configure regional VNet integration to route all traffic to the VNet.", arg_type=get_three_state_flag(return_label=True)) c.argument('generic_configurations', nargs='+', help='Provide site configuration list in a format of either `key=value` pair or `@<json_file>`. PowerShell and Windows Command Prompt users should use a JSON file to provide these configurations to avoid compatibility issues with escape characters.') with self.argument_context(scope + ' config container') as c: c.argument('docker_registry_server_url', options_list=['--docker-registry-server-url', '-r'], help='the container registry server url') c.argument('docker_custom_image_name', options_list=['--docker-custom-image-name', '-c', '-i'], help='the container custom image name and optionally the tag name') c.argument('docker_registry_server_user', options_list=['--docker-registry-server-user', '-u'], help='the container registry server username') c.argument('docker_registry_server_password', options_list=['--docker-registry-server-password', '-p'], help='the container registry server password') c.argument('websites_enable_app_service_storage', options_list=['--enable-app-service-storage', '-t'], help='enables platform storage (custom container only)', arg_type=get_three_state_flag(return_label=True)) c.argument('multicontainer_config_type', options_list=['--multicontainer-config-type'], help='config type', arg_type=get_enum_type(MULTI_CONTAINER_TYPES)) c.argument('multicontainer_config_file', options_list=['--multicontainer-config-file'], help="config file for multicontainer apps") c.argument('show_multicontainer_config', action='store_true', help='shows decoded config if a multicontainer config is set') with self.argument_context(scope + ' deployment container config') as c: c.argument('enable', options_list=['--enable-cd', '-e'], help='enable/disable continuous deployment', arg_type=get_three_state_flag(return_label=True)) with self.argument_context('webapp config connection-string list') as c: c.argument('name', arg_type=webapp_name_arg_type, id_part=None) with self.argument_context('webapp config storage-account list') as c: c.argument('name', arg_type=webapp_name_arg_type, id_part=None) with self.argument_context('webapp config hostname') as c: c.argument('webapp_name', help="webapp name. You can configure the default using `az configure --defaults web=<name>`", configured_default='web', completer=get_resource_name_completion_list('Microsoft.Web/sites'), id_part='name', local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET])) with self.argument_context('webapp deployment list-publishing-profiles') as c: c.argument('xml', options_list=['--xml'], required=False, help='retrieves the publishing profile details in XML format') with self.argument_context('webapp deployment slot') as c: c.argument('slot', help='the name of the slot') c.argument('webapp', arg_type=name_arg_type, completer=get_resource_name_completion_list('Microsoft.Web/sites'), help='Name of the webapp', id_part='name', local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET])) c.argument('auto_swap_slot', help='target slot to auto swap', default='production') c.argument('disable', help='disable auto swap', action='store_true') c.argument('target_slot', help="target slot to swap, default to 'production'") c.argument('preserve_vnet', help="preserve Virtual Network to the slot during swap, default to 'true'", arg_type=get_three_state_flag(return_label=True)) with self.argument_context('webapp deployment slot create') as c: c.argument('configuration_source', help="source slot to clone configurations from. Use web app's name to refer to the production slot") with self.argument_context('webapp deployment slot swap') as c: c.argument('action', help="swap types. use 'preview' to apply target slot's settings on the source slot first; use 'swap' to complete it; use 'reset' to reset the swap", arg_type=get_enum_type(['swap', 'preview', 'reset'])) with self.argument_context('webapp deployment github-actions')as c: c.argument('name', arg_type=webapp_name_arg_type) c.argument('resource_group', arg_type=resource_group_name_type, options_list=['--resource-group', '-g']) c.argument('repo', help='The GitHub repository to which the workflow file will be added. In the format: <owner>/<repository-name>') c.argument('token', help='A Personal Access Token with write access to the specified repository. For more information: https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line') c.argument('slot', options_list=['--slot', '-s'], help='The name of the slot. Default to the production slot if not specified.') c.argument('branch', options_list=['--branch', '-b'], help='The branch to which the workflow file will be added. Defaults to "master" if not specified.') c.argument('login_with_github', help='Interactively log in with Github to retrieve the Personal Access Token', action='store_true') with self.argument_context('webapp deployment github-actions add')as c: c.argument('runtime', options_list=['--runtime', '-r'], help='Canonicalized web runtime in the format of Framework|Version, e.g. "PHP|5.6". Use "az webapp list-runtimes" for available list.') c.argument('force', options_list=['--force', '-f'], help='When true, the command will overwrite any workflow file with a conflicting name.', action='store_true') with self.argument_context('webapp log config') as c: c.argument('application_logging', help='configure application logging', arg_type=get_enum_type(['filesystem', 'azureblobstorage', 'off'])) c.argument('detailed_error_messages', help='configure detailed error messages', arg_type=get_three_state_flag(return_label=True)) c.argument('failed_request_tracing', help='configure failed request tracing', arg_type=get_three_state_flag(return_label=True)) c.argument('level', help='logging level', arg_type=get_enum_type(['error', 'warning', 'information', 'verbose'])) c.argument('web_server_logging', help='configure Web server logging', arg_type=get_enum_type(['off', 'filesystem'])) c.argument('docker_container_logging', help='configure gathering STDOUT and STDERR output from container', arg_type=get_enum_type(['off', 'filesystem'])) with self.argument_context('webapp log tail') as c: c.argument('provider', help="By default all live traces configured by `az webapp log config` will be shown, but you can scope to certain providers/folders, e.g. 'application', 'http', etc. For details, check out https://github.com/projectkudu/kudu/wiki/Diagnostic-Log-Stream") with self.argument_context('webapp log download') as c: c.argument('log_file', default='webapp_logs.zip', type=file_type, completer=FilesCompleter(), help='the downloaded zipped log file path') with self.argument_context('webapp log deployment show') as c: c.argument('name', arg_type=webapp_name_arg_type, id_part=None) c.argument('resource_group', arg_type=resource_group_name_type) c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified") c.argument('deployment_id', options_list=['--deployment-id'], help='Deployment ID. If none specified, returns the deployment logs of the latest deployment.') with self.argument_context('webapp log deployment list') as c: c.argument('name', arg_type=webapp_name_arg_type, id_part=None) c.argument('resource_group', arg_type=resource_group_name_type) c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified") with self.argument_context('functionapp log deployment show') as c: c.argument('name', arg_type=functionapp_name_arg_type, id_part=None) c.argument('resource_group', arg_type=resource_group_name_type) c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified") c.argument('deployment_id', options_list=['--deployment-id'], help='Deployment ID. If none specified, returns the deployment logs of the latest deployment.') with self.argument_context('functionapp log deployment list') as c: c.argument('name', arg_type=functionapp_name_arg_type, id_part=None) c.argument('resource_group', arg_type=resource_group_name_type) c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified") for scope in ['appsettings', 'connection-string']: with self.argument_context('webapp config ' + scope) as c: c.argument('settings', nargs='+', help="space-separated {} in a format of `<name>=<value>`".format(scope)) c.argument('slot_settings', nargs='+', help="space-separated slot {} in a format of either `<name>=<value>` or `@<json_file>`".format( scope)) c.argument('setting_names', nargs='+', help="space-separated {} names".format(scope)) with self.argument_context('webapp config connection-string') as c: c.argument('connection_string_type', options_list=['--connection-string-type', '-t'], help='connection string type', arg_type=get_enum_type(ConnectionStringType)) c.argument('ids', options_list=['--ids'], help="One or more resource IDs (space delimited). If provided no other 'Resource Id' arguments should be specified.", required=True) c.argument('resource_group', options_list=['--resource-group', '-g'], help='Name of resource group. You can configure the default group using `az configure --default-group=<name>`. If `--ids` is provided this should NOT be specified.') c.argument('name', options_list=['--name', '-n'], help='Name of the web app. You can configure the default using `az configure --defaults web=<name>`. If `--ids` is provided this should NOT be specified.', local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET])) with self.argument_context('webapp config storage-account') as c: c.argument('custom_id', options_list=['--custom-id', '-i'], help='name of the share configured within the web app') c.argument('storage_type', options_list=['--storage-type', '-t'], help='storage type', arg_type=get_enum_type(AzureStorageType)) c.argument('account_name', options_list=['--account-name', '-a'], help='storage account name') c.argument('share_name', options_list=['--share-name', '--sn'], help='name of the file share as given in the storage account') c.argument('access_key', options_list=['--access-key', '-k'], help='storage account access key') c.argument('mount_path', options_list=['--mount-path', '-m'], help='the path which the web app uses to read-write data ex: /share1 or /share2') c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified") with self.argument_context('webapp config storage-account add') as c: c.argument('slot_setting', options_list=['--slot-setting'], help="slot setting") with self.argument_context('webapp config storage-account update') as c: c.argument('slot_setting', options_list=['--slot-setting'], help="slot setting") with self.argument_context('webapp config backup') as c: c.argument('storage_account_url', help='URL with SAS token to the blob storage container', options_list=['--container-url']) c.argument('webapp_name', help='The name of the web app', local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET])) c.argument('db_name', help='Name of the database in the backup', arg_group='Database') c.argument('db_connection_string', help='Connection string for the database in the backup', arg_group='Database') c.argument('db_type', help='Type of database in the backup', arg_group='Database', arg_type=get_enum_type(DatabaseType)) with self.argument_context('webapp config backup create') as c: c.argument('backup_name', help='Name of the backup. If unspecified, the backup will be named with the web app name and a timestamp', local_context_attribute=LocalContextAttribute(name='backup_name', actions=[LocalContextAction.SET], scopes=['webapp'])) with self.argument_context('webapp config backup update') as c: c.argument('backup_name', help='Name of the backup. If unspecified, the backup will be named with the web app name and a timestamp', local_context_attribute=LocalContextAttribute(name='backup_name', actions=[LocalContextAction.GET])) c.argument('frequency', help='How often to backup. Use a number followed by d or h, e.g. 5d = 5 days, 2h = 2 hours') c.argument('keep_at_least_one_backup', help='Always keep one backup, regardless of how old it is', options_list=['--retain-one'], arg_type=get_three_state_flag(return_label=True)) c.argument('retention_period_in_days', help='How many days to keep a backup before automatically deleting it. Set to 0 for indefinite retention', options_list=['--retention']) with self.argument_context('webapp config backup restore') as c: c.argument('backup_name', help='Name of the backup to restore', local_context_attribute=LocalContextAttribute(name='backup_name', actions=[LocalContextAction.GET])) c.argument('target_name', help='The name to use for the restored web app. If unspecified, will default to the name that was used when the backup was created') c.argument('overwrite', help='Overwrite the source web app, if --target-name is not specified', action='store_true') c.argument('ignore_hostname_conflict', help='Ignores custom hostnames stored in the backup', action='store_true') with self.argument_context('webapp config snapshot') as c: c.argument('name', arg_type=webapp_name_arg_type) c.argument('slot', options_list=['--slot', '-s'], help='The name of the slot.') with self.argument_context('webapp config snapshot list') as c: c.argument('name', arg_type=webapp_name_arg_type, id_part=None) with self.argument_context('webapp config snapshot restore') as c: c.argument('time', help='Timestamp of the snapshot to restore.') c.argument('restore_content_only', help='Restore the web app files without restoring the settings.') c.argument('source_resource_group', help='Name of the resource group to retrieve snapshot from.') c.argument('source_name', help='Name of the web app to retrieve snapshot from.') c.argument('source_slot', help='Name of the web app slot to retrieve snapshot from.') with self.argument_context('webapp auth update') as c: c.argument('enabled', arg_type=get_three_state_flag(return_label=True)) c.argument('token_store_enabled', options_list=['--token-store'], arg_type=get_three_state_flag(return_label=True), help='use App Service Token Store') c.argument('action', arg_type=get_enum_type(AUTH_TYPES)) c.argument('runtime_version', help='Runtime version of the Authentication/Authorization feature in use for the current app') c.argument('token_refresh_extension_hours', type=float, help="Hours, must be formattable into a float") c.argument('allowed_external_redirect_urls', nargs='+', help="One or more urls (space-delimited).") c.argument('client_id', options_list=['--aad-client-id'], arg_group='Azure Active Directory', help='Application ID to integrate AAD organization account Sign-in into your web app') c.argument('client_secret', options_list=['--aad-client-secret'], arg_group='Azure Active Directory', help='AAD application secret') c.argument('client_secret_certificate_thumbprint', options_list=['--aad-client-secret-certificate-thumbprint', '--thumbprint'], arg_group='Azure Active Directory', help='Alternative to AAD Client Secret, thumbprint of a certificate used for signing purposes') c.argument('allowed_audiences', nargs='+', options_list=['--aad-allowed-token-audiences'], arg_group='Azure Active Directory', help="One or more token audiences (space-delimited).") c.argument('issuer', options_list=['--aad-token-issuer-url'], help='This url can be found in the JSON output returned from your active directory endpoint using your tenantID. The endpoint can be queried from `az cloud show` at \"endpoints.activeDirectory\". ' 'The tenantID can be found using `az account show`. Get the \"issuer\" from the JSON at <active directory endpoint>/<tenantId>/.well-known/openid-configuration.', arg_group='Azure Active Directory') c.argument('facebook_app_id', arg_group='Facebook', help="Application ID to integrate Facebook Sign-in into your web app") c.argument('facebook_app_secret', arg_group='Facebook', help='Facebook Application client secret') c.argument('facebook_oauth_scopes', nargs='+', help="One or more facebook authentication scopes (space-delimited).", arg_group='Facebook') c.argument('twitter_consumer_key', arg_group='Twitter', help='Application ID to integrate Twitter Sign-in into your web app') c.argument('twitter_consumer_secret', arg_group='Twitter', help='Twitter Application client secret') c.argument('google_client_id', arg_group='Google', help='Application ID to integrate Google Sign-in into your web app') c.argument('google_client_secret', arg_group='Google', help='Google Application client secret') c.argument('google_oauth_scopes', nargs='+', help="One or more Google authentication scopes (space-delimited).", arg_group='Google') c.argument('microsoft_account_client_id', arg_group='Microsoft', help="AAD V2 Application ID to integrate Microsoft account Sign-in into your web app") c.argument('microsoft_account_client_secret', arg_group='Microsoft', help='AAD V2 Application client secret') c.argument('microsoft_account_oauth_scopes', nargs='+', help="One or more Microsoft authentification scopes (space-delimited).", arg_group='Microsoft') with self.argument_context('webapp hybrid-connection') as c: c.argument('name', arg_type=webapp_name_arg_type, id_part=None) c.argument('slot', help="the name of the slot. Default to the productions slot if not specified") c.argument('namespace', help="Hybrid connection namespace") c.argument('hybrid_connection', help="Hybrid connection name") with self.argument_context('functionapp hybrid-connection') as c: c.argument('name', id_part=None, local_context_attribute=LocalContextAttribute(name='functionapp_name', actions=[ LocalContextAction.GET])) c.argument('slot', help="the name of the slot. Default to the productions slot if not specified") c.argument('namespace', help="Hybrid connection namespace") c.argument('hybrid_connection', help="Hybrid connection name") with self.argument_context('appservice hybrid-connection set-key') as c: c.argument('plan', help="AppService plan", local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET])) c.argument('namespace', help="Hybrid connection namespace") c.argument('hybrid_connection', help="Hybrid connection name") c.argument('key_type', help="Which key (primary or secondary) should be used") with self.argument_context('appservice vnet-integration list') as c: c.argument('plan', help="AppService plan", local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET])) c.argument('resource_group', arg_type=resource_group_name_type) with self.argument_context('webapp up') as c: c.argument('name', arg_type=webapp_name_arg_type, local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET, LocalContextAction.SET], scopes=['webapp', 'cupertino'])) c.argument('plan', options_list=['--plan', '-p'], configured_default='appserviceplan', completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'), help="name of the appserviceplan associated with the webapp", local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET])) c.argument('sku', arg_type=sku_arg_type) c.argument('os_type', options_list=['--os-type'], arg_type=get_enum_type(OS_TYPES), help="Set the OS type for the app to be created.") c.argument('runtime', options_list=['--runtime', '-r'], help="canonicalized web runtime in the format of Framework|Version, e.g. \"PHP|7.2\". Allowed delimiters: \"|\" or \":\". " "Use `az webapp list-runtimes` for available list.") c.argument('dryrun', help="show summary of the create and deploy operation instead of executing it", default=False, action='store_true') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('launch_browser', help="Launch the created app using the default browser", default=False, action='store_true', options_list=['--launch-browser', '-b']) c.argument('logs', help="Configure default logging required to enable viewing log stream immediately after launching the webapp", default=False, action='store_true') c.argument('html', help="Ignore app detection and deploy as an html app", default=False, action='store_true') c.argument('app_service_environment', options_list=['--app-service-environment', '-e'], help='name of the (pre-existing) App Service Environment to deploy to. Requires an Isolated V2 sku [I1v2, I2v2, I3v2]') with self.argument_context('webapp ssh') as c: c.argument('port', options_list=['--port', '-p'], help='Port for the remote connection. Default: Random available port', type=int) c.argument('timeout', options_list=['--timeout', '-t'], help='timeout in seconds. Defaults to none', type=int) c.argument('instance', options_list=['--instance', '-i'], help='Webapp instance to connect to. Defaults to none.') with self.argument_context('webapp create-remote-connection') as c: c.argument('port', options_list=['--port', '-p'], help='Port for the remote connection. Default: Random available port', type=int) c.argument('timeout', options_list=['--timeout', '-t'], help='timeout in seconds. Defaults to none', type=int) c.argument('instance', options_list=['--instance', '-i'], help='Webapp instance to connect to. Defaults to none.') with self.argument_context('webapp vnet-integration') as c: c.argument('name', arg_type=webapp_name_arg_type, id_part=None) c.argument('slot', help="The name of the slot. Default to the productions slot if not specified.") c.argument('vnet', help="The name or resource ID of the Vnet", local_context_attribute=LocalContextAttribute(name='vnet_name', actions=[LocalContextAction.GET])) c.argument('subnet', help="The name or resource ID of the subnet", local_context_attribute=LocalContextAttribute(name='subnet_name', actions=[LocalContextAction.GET])) c.argument('skip_delegation_check', help="Skip check if you do not have permission or the VNet is in another subscription.", arg_type=get_three_state_flag(return_label=True)) with self.argument_context('webapp deploy') as c: c.argument('name', options_list=['--name', '-n'], help='Name of the webapp to deploy to.') c.argument('src_path', options_list=['--src-path'], help='Path of the artifact to be deployed. Ex: "myapp.zip" or "/myworkspace/apps/myapp.war"') c.argument('src_url', options_list=['--src-url'], help='URL of the artifact. The webapp will pull the artifact from this URL. Ex: "http://mysite.com/files/myapp.war?key=123"') c.argument('target_path', options_list=['--target-path'], help='Absolute path that the artifact should be deployed to. Defaults to "home/site/wwwroot/" Ex: "/home/site/deployments/tools/", "/home/site/scripts/startup-script.sh".') c.argument('artifact_type', options_list=['--type'], help='Used to override the type of artifact being deployed.', choices=['war', 'jar', 'ear', 'lib', 'startup', 'static', 'zip']) c.argument('is_async', options_list=['--async'], help='If true, the artifact is deployed asynchronously. (The command will exit once the artifact is pushed to the web app.)', choices=['true', 'false']) c.argument('restart', options_list=['--restart'], help='If true, the web app will be restarted following the deployment. Set this to false if you are deploying multiple artifacts and do not want to restart the site on the earlier deployments.', choices=['true', 'false']) c.argument('clean', options_list=['--clean'], help='If true, cleans the target directory prior to deploying the file(s). Default value is determined based on artifact type.', choices=['true', 'false']) c.argument('ignore_stack', options_list=['--ignore-stack'], help='If true, any stack-specific defaults are ignored.', choices=['true', 'false']) c.argument('timeout', options_list=['--timeout'], help='Timeout for the deployment operation in milliseconds.') c.argument('slot', help="The name of the slot. Default to the productions slot if not specified.") with self.argument_context('functionapp deploy') as c: c.argument('name', options_list=['--name', '-n'], help='Name of the function app to deploy to.') c.argument('src_path', options_list=['--src-path'], help='Path of the artifact to be deployed. Ex: "myapp.zip" or "/myworkspace/apps/myapp.war"') c.argument('src_url', options_list=['--src-url'], help='URL of the artifact. The webapp will pull the artifact from this URL. Ex: "http://mysite.com/files/myapp.war?key=123"') c.argument('target_path', options_list=['--target-path'], help='Absolute path that the artifact should be deployed to. Defaults to "home/site/wwwroot/". Ex: "/home/site/deployments/tools/", "/home/site/scripts/startup-script.sh".') c.argument('artifact_type', options_list=['--type'], help='Used to override the type of artifact being deployed.', choices=['war', 'jar', 'ear', 'lib', 'startup', 'static', 'zip']) c.argument('is_async', options_list=['--async'], help='Asynchronous deployment', choices=['true', 'false']) c.argument('restart', options_list=['--restart'], help='If true, the web app will be restarted following the deployment, default value is true. Set this to false if you are deploying multiple artifacts and do not want to restart the site on the earlier deployments.', choices=['true', 'false']) c.argument('clean', options_list=['--clean'], help='If true, cleans the target directory prior to deploying the file(s). Default value is determined based on artifact type.', choices=['true', 'false']) c.argument('ignore_stack', options_list=['--ignore-stack'], help='If true, any stack-specific defaults are ignored.', choices=['true', 'false']) c.argument('timeout', options_list=['--timeout'], help='Timeout for the deployment operation in milliseconds.') c.argument('slot', help="The name of the slot. Default to the productions slot if not specified.") with self.argument_context('functionapp create') as c: c.argument('vnet', options_list=['--vnet'], help="Name or resource ID of the regional virtual network. If there are multiple vnets of the same name across different resource groups, use vnet resource id to specify which vnet to use. If vnet name is used, by default, the vnet in the same resource group as the webapp will be used. Must be used with --subnet argument.") c.argument('subnet', options_list=['--subnet'], help="Name or resource ID of the pre-existing subnet to have the webapp join. The --vnet is argument also needed if specifying subnet by name.") with self.argument_context('functionapp vnet-integration') as c: c.argument('name', arg_type=functionapp_name_arg_type, id_part=None) c.argument('slot', help="The name of the slot. Default to the productions slot if not specified") c.argument('vnet', help="The name or resource ID of the Vnet", validator=validate_add_vnet, local_context_attribute=LocalContextAttribute(name='vnet_name', actions=[LocalContextAction.GET])) c.argument('subnet', help="The name or resource ID of the subnet", local_context_attribute=LocalContextAttribute(name='subnet_name', actions=[LocalContextAction.GET])) c.argument('skip_delegation_check', help="Skip check if you do not have permission or the VNet is in another subscription.", arg_type=get_three_state_flag(return_label=True)) for scope in ['functionapp', 'logicapp']: app_type = scope[:-3] # 'function' or 'logic' with self.argument_context(scope) as c: c.ignore('app_instance') c.argument('name', arg_type=name_arg_type_dict[scope], id_part='name', help='name of the {} app'.format(app_type)) c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified") with self.argument_context(scope + ' create') as c: c.argument('plan', options_list=['--plan', '-p'], configured_default='appserviceplan', completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'), help="name or resource id of the {} app service plan. Use 'appservice plan create' to get one. If using an App Service plan from a different resource group, the full resource id must be used and not the plan name.".format(scope), local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET])) c.argument('name', options_list=['--name', '-n'], help='name of the new {} app'.format(app_type), local_context_attribute=LocalContextAttribute(name=scope + '_name', actions=[LocalContextAction.SET], scopes=[scope])) c.argument('storage_account', options_list=['--storage-account', '-s'], help='Provide a string value of a Storage Account in the provided Resource Group. Or Resource ID of a Storage Account in a different Resource Group', local_context_attribute=LocalContextAttribute(name='storage_account_name', actions=[LocalContextAction.GET])) c.argument('consumption_plan_location', options_list=['--consumption-plan-location', '-c'], help="Geographic location where {} app will be hosted. Use `az {} list-consumption-locations` to view available locations.".format(app_type, scope)) c.argument('os_type', arg_type=get_enum_type(OS_TYPES), help="Set the OS type for the app to be created.") c.argument('app_insights_key', help="Instrumentation key of App Insights to be added.") c.argument('app_insights', help="Name of the existing App Insights project to be added to the {} app. Must be in the ".format(app_type) + "same resource group.") c.argument('disable_app_insights', arg_type=get_three_state_flag(return_label=True), help="Disable creating application insights resource during {} create. No logs will be available.".format(scope)) c.argument('docker_registry_server_user', options_list=['--docker-registry-server-user', '-d'], help='The container registry server username.') c.argument('docker_registry_server_password', options_list=['--docker-registry-server-password', '-w'], help='The container registry server password. Required for private registries.') if scope == 'functionapp': c.argument('functions_version', help='The functions app version. NOTE: This will be required starting the next release cycle', arg_type=get_enum_type(FUNCTIONS_VERSIONS)) c.argument('runtime', help='The functions runtime stack.', arg_type=get_enum_type(functionapp_runtime_strings)) c.argument('runtime_version', help='The version of the functions runtime stack. ' 'Allowed values for each --runtime are: ' + ', '.join(functionapp_runtime_to_version_strings)) with self.argument_context('functionapp config hostname') as c: c.argument('webapp_name', arg_type=functionapp_name_arg_type, id_part='name') # For commands with shared impl between web app and function app and has output, we apply type validation to avoid confusions with self.argument_context('functionapp show') as c: c.argument('name', arg_type=functionapp_name_arg_type) with self.argument_context('functionapp delete') as c: c.argument('name', arg_type=functionapp_name_arg_type, local_context_attribute=None) with self.argument_context('functionapp config appsettings') as c: c.argument('slot_settings', nargs='+', help="space-separated slot app settings in a format of `<name>=<value>`") with self.argument_context('logicapp show') as c: c.argument('name', arg_type=logicapp_name_arg_type) with self.argument_context('logicapp delete') as c: c.argument('name', arg_type=logicapp_name_arg_type, local_context_attribute=None) with self.argument_context('functionapp plan') as c: c.argument('name', arg_type=name_arg_type, help='The name of the app service plan', completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'), configured_default='appserviceplan', id_part='name', local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET])) c.argument('is_linux', arg_type=get_three_state_flag(return_label=True), required=False, help='host function app on Linux worker') c.argument('number_of_workers', options_list=['--number-of-workers', '--min-instances'], help='The number of workers for the app service plan.') c.argument('max_burst', help='The maximum number of elastic workers for the plan.') c.argument('tags', arg_type=tags_type) with self.argument_context('functionapp update') as c: c.argument('plan', required=False, help='The name or resource id of the plan to update the functionapp with.') c.argument('force', required=False, help='Required if attempting to migrate functionapp from Premium to Consumption --plan.', action='store_true') with self.argument_context('functionapp plan create') as c: c.argument('name', arg_type=name_arg_type, help='The name of the app service plan', completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'), configured_default='appserviceplan', id_part='name', local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.SET], scopes=['appservice', 'webapp', 'functionapp'])) c.argument('zone_redundant', options_list=['--zone-redundant', '-z'], help='Enable zone redundancy for high availability. Cannot be changed after plan creation. Minimum instance count is 3.') c.argument('sku', required=True, help='The SKU of the app service plan. e.g., F1(Free), D1(Shared), B1(Basic Small), ' 'B2(Basic Medium), B3(Basic Large), S1(Standard Small), ' 'P1V2(Premium V2 Small), PC2 (Premium Container Small), PC3 ' '(Premium Container Medium), PC4 (Premium Container Large), I1 ' '(Isolated Small), I2 (Isolated Medium), I3 (Isolated Large), K1 ' '(Kubernetes).') with self.argument_context('functionapp plan update') as c: c.argument('sku', required=False, help='The SKU of the app service plan.') with self.argument_context('functionapp plan delete') as c: c.argument('name', arg_type=name_arg_type, help='The name of the app service plan', completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'), configured_default='appserviceplan', id_part='name', local_context_attribute=None) with self.argument_context('functionapp deployment list-publishing-profiles') as c: c.argument('xml', options_list=['--xml'], required=False, help='retrieves the publishing profile details in XML format') with self.argument_context('functionapp deployment slot') as c: c.argument('slot', help='the name of the slot') # This is set to webapp to simply reuse webapp functions, without rewriting same functions for function apps. # The help will still show "-n or --name", so it should not be a problem to do it this way c.argument('webapp', arg_type=functionapp_name_arg_type, completer=get_resource_name_completion_list('Microsoft.Web/sites'), help='Name of the function app', id_part='name') c.argument('auto_swap_slot', help='target slot to auto swap', default='production') c.argument('disable', help='disable auto swap', action='store_true') c.argument('target_slot', help="target slot to swap, default to 'production'") c.argument('preserve_vnet', help="preserve Virtual Network to the slot during swap, default to 'true'", arg_type=get_three_state_flag(return_label=True)) with self.argument_context('functionapp deployment slot create') as c: c.argument('configuration_source', help="source slot to clone configurations from. Use function app's name to refer to the production slot") with self.argument_context('functionapp deployment slot swap') as c: c.argument('action', help="swap types. use 'preview' to apply target slot's settings on the source slot first; use 'swap' to complete it; use 'reset' to reset the swap", arg_type=get_enum_type(['swap', 'preview', 'reset'])) with self.argument_context('functionapp keys', id_part=None) as c: c.argument('resource_group_name', arg_type=resource_group_name_type,) c.argument('name', arg_type=functionapp_name_arg_type, completer=get_resource_name_completion_list('Microsoft.Web/sites'), help='Name of the function app') c.argument('slot', options_list=['--slot', '-s'], help="The name of the slot. Defaults to the productions slot if not specified") with self.argument_context('functionapp keys set', id_part=None) as c: c.argument('key_name', help="Name of the key to set.") c.argument('key_value', help="Value of the new key. If not provided, a value will be generated.") c.argument('key_type', help="Type of key.", arg_type=get_enum_type(['systemKey', 'functionKeys', 'masterKey'])) with self.argument_context('functionapp keys delete', id_part=None) as c: c.argument('key_name', help="Name of the key to set.") c.argument('key_type', help="Type of key.", arg_type=get_enum_type(['systemKey', 'functionKeys', 'masterKey'])) with self.argument_context('functionapp function', id_part=None) as c: c.argument('resource_group_name', arg_type=resource_group_name_type,) c.argument('name', arg_type=functionapp_name_arg_type, completer=get_resource_name_completion_list('Microsoft.Web/sites'), help='Name of the function app') c.argument('function_name', help="Name of the Function") with self.argument_context('functionapp function keys', id_part=None) as c: c.argument('slot', options_list=['--slot', '-s'], help="The name of the slot. Defaults to the productions slot if not specified") with self.argument_context('functionapp function keys set', id_part=None) as c: c.argument('key_name', help="Name of the key to set.") c.argument('key_value', help="Value of the new key. If not provided, a value will be generated.") with self.argument_context('functionapp function keys delete', id_part=None) as c: c.argument('key_name', help="Name of the key to set.") # Access Restriction Commands for scope in ['webapp', 'functionapp']: with self.argument_context(scope + ' config access-restriction show') as c: c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type)) with self.argument_context(scope + ' config access-restriction add') as c: c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type)) c.argument('rule_name', options_list=['--rule-name', '-r'], help='Name of the access restriction rule to add') c.argument('priority', options_list=['--priority', '-p'], help="Priority of the access restriction rule") c.argument('description', help='Description of the access restriction rule') c.argument('action', arg_type=get_enum_type(ACCESS_RESTRICTION_ACTION_TYPES), help="Allow or deny access") c.argument('ip_address', help="IP address or CIDR range (optional comma separated list of up to 8 ranges)", validator=validate_ip_address) c.argument('service_tag', help="Service Tag (optional comma separated list of up to 8 tags)", validator=validate_service_tag) c.argument('vnet_name', help="vNet name") c.argument('subnet', help="Subnet name (requires vNet name) or subnet resource id") c.argument('ignore_missing_vnet_service_endpoint', options_list=['--ignore-missing-endpoint', '-i'], help='Create access restriction rule with checking if the subnet has Microsoft.Web service endpoint enabled', arg_type=get_three_state_flag(), default=False) c.argument('scm_site', help='True if access restrictions is added for scm site', arg_type=get_three_state_flag()) c.argument('vnet_resource_group', help='Resource group of virtual network (default is web app resource group)') c.argument('http_headers', nargs='+', help="space-separated http headers in a format of `<name>=<value>`") with self.argument_context(scope + ' config access-restriction remove') as c: c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type)) c.argument('rule_name', options_list=['--rule-name', '-r'], help='Name of the access restriction to remove') c.argument('ip_address', help="IP address or CIDR range (optional comma separated list of up to 8 ranges)", validator=validate_ip_address) c.argument('service_tag', help="Service Tag (optional comma separated list of up to 8 tags)", validator=validate_service_tag) c.argument('vnet_name', help="vNet name") c.argument('subnet', help="Subnet name (requires vNet name) or subnet resource id") c.argument('scm_site', help='True if access restriction should be removed from scm site', arg_type=get_three_state_flag()) c.argument('action', arg_type=get_enum_type(ACCESS_RESTRICTION_ACTION_TYPES), help="Allow or deny access") with self.argument_context(scope + ' config access-restriction set') as c: c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type)) c.argument('use_same_restrictions_for_scm_site', help="Use same access restrictions for scm site", arg_type=get_three_state_flag()) # App Service Environment Commands with self.argument_context('appservice ase show') as c: c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment', local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET])) with self.argument_context('appservice ase create') as c: c.argument('name', options_list=['--name', '-n'], validator=validate_ase_create, help='Name of the app service environment', local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.SET], scopes=['appservice'])) c.argument('kind', options_list=['--kind', '-k'], arg_type=get_enum_type(ASE_KINDS), default='ASEv2', help="Specify App Service Environment version") c.argument('subnet', help='Name or ID of existing subnet. To create vnet and/or subnet \ use `az network vnet [subnet] create`') c.argument('vnet_name', help='Name of the vNet. Mandatory if only subnet name is specified.') c.argument('virtual_ip_type', arg_type=get_enum_type(ASE_LOADBALANCER_MODES), help="Specify if app service environment should be accessible from internet") c.argument('ignore_subnet_size_validation', arg_type=get_three_state_flag(), help='Do not check if subnet is sized according to recommendations.') c.argument('ignore_route_table', arg_type=get_three_state_flag(), help='Configure route table manually. Applies to ASEv2 only.') c.argument('ignore_network_security_group', arg_type=get_three_state_flag(), help='Configure network security group manually. Applies to ASEv2 only.') c.argument('force_route_table', arg_type=get_three_state_flag(), help='Override route table for subnet. Applies to ASEv2 only.') c.argument('force_network_security_group', arg_type=get_three_state_flag(), help='Override network security group for subnet. Applies to ASEv2 only.') c.argument('front_end_scale_factor', type=int, validator=validate_front_end_scale_factor, help='Scale of front ends to app service plan instance ratio. Applies to ASEv2 only.', default=15) c.argument('front_end_sku', arg_type=isolated_sku_arg_type, default='I1', help='Size of front end servers. Applies to ASEv2 only.') c.argument('os_preference', arg_type=get_enum_type(ASE_OS_PREFERENCE_TYPES), help='Determine if app service environment should start with Linux workers. Applies to ASEv2 only.') c.argument('zone_redundant', arg_type=get_three_state_flag(), help='Configure App Service Environment as Zone Redundant. Applies to ASEv3 only.') with self.argument_context('appservice ase delete') as c: c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment') with self.argument_context('appservice ase update') as c: c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment', local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET])) c.argument('front_end_scale_factor', type=int, validator=validate_front_end_scale_factor, help='(ASEv2 only) Scale of front ends to app service plan instance ratio between 5 and 15.') c.argument('front_end_sku', arg_type=isolated_sku_arg_type, help='(ASEv2 only) Size of front end servers.') c.argument('allow_new_private_endpoint_connections', arg_type=get_three_state_flag(), options_list=['--allow-new-private-endpoint-connections', '-p'], help='(ASEv3 only) Configure Apps in App Service Environment to allow new private endpoint connections.') with self.argument_context('appservice ase list-addresses') as c: c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment', local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET])) with self.argument_context('appservice ase list-plans') as c: c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment', local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET])) with self.argument_context('appservice ase create-inbound-services') as c: c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment', local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET])) c.argument('subnet', help='Name or ID of existing subnet for inbound traffic to ASEv3. \ To create vnet and/or subnet use `az network vnet [subnet] create`') c.argument('vnet_name', help='Name of the vNet. Mandatory if only subnet name is specified.') c.argument('skip_dns', arg_type=get_three_state_flag(), help='Do not create Private DNS Zone and DNS records.') # App Service Domain Commands with self.argument_context('appservice domain create') as c: c.argument('hostname', options_list=['--hostname', '-n'], help='Name of the custom domain') c.argument('contact_info', options_list=['--contact-info', '-c'], help='The file path to a JSON object with your contact info for domain registration. ' 'Please see the following link for the format of the JSON file expected: ' 'https://github.com/AzureAppServiceCLI/appservice_domains_templates/blob/master/contact_info.json') c.argument('privacy', options_list=['--privacy', '-p'], help='Enable privacy protection') c.argument('auto_renew', options_list=['--auto-renew', '-a'], help='Enable auto-renew on the domain') c.argument('accept_terms', options_list=['--accept-terms'], help='By using this flag, you are accepting ' 'the conditions shown using the --show-hostname-purchase-terms flag. ') c.argument('tags', arg_type=tags_type) c.argument('dryrun', help='Show summary of the purchase and create operation instead of executing it') c.argument('no_wait', help='Do not wait for the create to complete, and return immediately after queuing the create.') c.argument('validate', help='Generate and validate the ARM template without creating any resources') with self.argument_context('appservice domain show-terms') as c: c.argument('hostname', options_list=['--hostname', '-n'], help='Name of the custom domain') with self.argument_context('staticwebapp', validator=validate_public_cloud) as c: c.argument('name', options_list=['--name', '-n'], metavar='NAME', help="Name of the static site") c.argument('source', options_list=['--source', '-s'], help="URL for the repository of the static site.") c.argument('token', options_list=['--token', '-t'], help="A user's github repository token. This is used to setup the Github Actions workflow file and " "API secrets. If you need to create a Github Personal Access Token, " "please run with the '--login-with-github' flag or follow the steps found at the following link:\n" "https://help.github.com/en/articles/creating-a-personal-access-token-for-the-command-line") c.argument('login_with_github', help="Interactively log in with Github to retrieve the Personal Access Token") c.argument('branch', options_list=['--branch', '-b'], help="The target branch in the repository.") with self.argument_context('staticwebapp environment') as c: c.argument('environment_name', options_list=['--environment-name'], help="Name of the environment of static site") with self.argument_context('staticwebapp hostname') as c: c.argument('hostname', options_list=['--hostname'], help="custom hostname such as www.example.com. Only support sub domain in preview.") with self.argument_context('staticwebapp hostname set') as c: c.argument('validation_method', options_list=['--validation-method', '-m'], help="Validation method for the custom domain.", arg_type=get_enum_type(["cname-delegation", "dns-txt-token"])) with self.argument_context('staticwebapp appsettings') as c: c.argument('setting_pairs', options_list=['--setting-names'], help="Space-separated app settings in 'key=value' format. ", nargs='*') c.argument('setting_names', options_list=['--setting-names'], help="Space-separated app setting names.", nargs='*') with self.argument_context('staticwebapp users') as c: c.argument('authentication_provider', options_list=['--authentication-provider'], help="Authentication provider of the user identity such as AAD, Facebook, GitHub, Google, Twitter.") c.argument('user_details', options_list=['--user-details'], help="Email for AAD, Facebook, and Google. Account name (handle) for GitHub and Twitter.") c.argument('user_id', help="Given id of registered user.") c.argument('domain', options_list=['--domain'], help="A domain added to the static app in quotes.") c.argument('roles', options_list=['--roles'], help="Comma-separated default or user-defined role names. " "Roles that can be assigned to a user are comma separated and case-insensitive (at most 50 " "roles up to 25 characters each and restricted to 0-9,A-Z,a-z, and _). " "Define roles in routes.json during root directory of your GitHub repo.") c.argument('invitation_expiration_in_hours', options_list=['--invitation-expiration-in-hours'], help="This value sets when the link will expire in hours. The maximum is 168 (7 days).") with self.argument_context('staticwebapp identity') as c: c.argument('scope', help="The scope the managed identity has access to") c.argument('role', help="Role name or id the managed identity will be assigned") with self.argument_context('staticwebapp identity assign') as c: c.argument('assign_identities', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID)) with self.argument_context('staticwebapp identity remove') as c: c.argument('remove_identities', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID)) with self.argument_context('staticwebapp create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('tags', arg_type=tags_type) c.argument('sku', arg_type=static_web_app_sku_arg_type) c.argument('app_location', options_list=['--app-location'], help="Location of your application code. For example, '/' represents the root of your app, " "while '/app' represents a directory called 'app'") c.argument('api_location', options_list=['--api-location'], help="Location of your Azure Functions code. For example, '/api' represents a folder called 'api'.") c.argument('app_artifact_location', options_list=['--app-artifact-location'], help="The path of your build output relative to your apps location. For example, setting a value " "of 'build' when your app location is set to '/app' will cause the content at '/app/build' to " "be served.", deprecate_info=c.deprecate(expiration='2.22.1')) c.argument('output_location', options_list=['--output-location'], help="The path of your build output relative to your apps location. For example, setting a value " "of 'build' when your app location is set to '/app' will cause the content at '/app/build' to " "be served.") with self.argument_context('staticwebapp update') as c: c.argument('tags', arg_type=tags_type) c.argument('sku', arg_type=static_web_app_sku_arg_type) with self.argument_context('staticwebapp functions link') as c: c.argument('function_resource_id', help="Resource ID of the functionapp to link. Can be retrieved with 'az functionapp --query id'") c.argument('force', help="Force the function link even if the function is already linked to a static webapp. May be needed if the function was previously linked to a static webapp.") def _get_functionapp_runtime_versions(): # set up functionapp create help menu KEYS = FUNCTIONS_STACKS_API_KEYS() stacks_api_json_list = [] stacks_api_json_list.append(get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])) stacks_api_json_list.append(get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])) # build a map of runtime -> runtime version -> runtime version properties runtime_to_version = {} for stacks_api_json in stacks_api_json_list: for runtime_json in stacks_api_json[KEYS.VALUE]: runtime_name = runtime_json[KEYS.NAME] for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]: runtime_version = runtime_version_json[KEYS.DISPLAY_VERSION] runtime_version_properties = { KEYS.IS_HIDDEN: runtime_version_json[KEYS.IS_HIDDEN], KEYS.IS_DEPRECATED: runtime_version_json[KEYS.IS_DEPRECATED], KEYS.IS_PREVIEW: runtime_version_json[KEYS.IS_PREVIEW], } runtime_to_version[runtime_name] = runtime_to_version.get(runtime_name, dict()) runtime_to_version[runtime_name][runtime_version] = runtime_version_properties # traverse the map to build an ordered string of runtimes -> runtime versions, # taking their properties into account (i.e. isHidden, isPreview) runtime_to_version_strings = [] for runtime, runtime_versions in runtime_to_version.items(): # dotnet and custom version is not configurable, so leave out of help menu if runtime in ('dotnet', 'custom'): continue ordered_runtime_versions = list(runtime_versions.keys()) ordered_runtime_versions.sort(key=float) ordered_runtime_versions_strings = [] for version in ordered_runtime_versions: if runtime_versions[version][KEYS.IS_HIDDEN] or runtime_versions[version][KEYS.IS_DEPRECATED]: continue if runtime_versions[version][KEYS.IS_PREVIEW]: ordered_runtime_versions_strings.append(version + ' (preview)') else: ordered_runtime_versions_strings.append(version) runtime_to_version_strings.append(runtime + ' -> [' + ', '.join(ordered_runtime_versions_strings) + ']') return runtime_to_version.keys(), runtime_to_version_strings
yugangw-msft/azure-cli
src/azure-cli/azure/cli/command_modules/appservice/_params.py
Python
mit
90,609
[ "ASE" ]
4711c6cc466c3d5f867754314d05d3b495e97bae1964a525135a5fe7ebc3998a
# -*- coding: utf-8 -*- """ ========================================== Reconstructing a Coulomb occupation ladder ========================================== Filling of the 2 degenerate orbitals of an atom in function of the chemical potential """ from scipy.special import binom from scipy.optimize import fsolve from numpy import linspace, exp, sum, zeros from matplotlib.pyplot import plot, xlabel, ylabel, title, legend, tight_layout #exact case def spectrum(mu, orbitals, U, beta, Q): return binom(2*orbitals, Q) * exp(-beta *(U/2.*(Q - orbitals)**2 - mu*Q)) def expected_filling(mu, orbitals, U, beta): Z = sum([spectrum(mu, orbitals, U, beta, Q) for Q in range(2*orbitals+1)], axis=0) n_avg = sum([Q*spectrum(mu, orbitals, U, beta, Q) for Q in range(2*orbitals+1)], axis=0) return n_avg / Z def fermi_dist(energy, beta): """ Fermi dirac distribution""" return 1./(exp(beta*energy) +1) def restriction(lam, mu, orbitals, U, beta): """Equation that determines the restriction on lagrange multipier""" return 2*orbitals*fermi_dist(-(mu + lam), beta) - expected_filling(-1*lam, orbitals, U, beta) def main(orbitals, beta, U, step): mu = linspace(-U*orbitals, U*orbitals, step) lam = fsolve(restriction, -mu, (mu, orbitals, U, beta)) plot(mu, expected_filling(mu, orbitals, U, beta), '--', label='Exact') plot(mu, 2*orbitals*fermi_dist(-(mu+lam), beta), label='Slave spin approx') legend(loc=0) title('Orbitals ocupation, $\\beta = {} $, $U= {} $'.format(beta, U), fontsize=14) xlabel('$\mu$', fontsize=20) ylabel('$n$', fontsize=20) tight_layout() return mu, lam def pressision_try(orbitals, U, beta, step): """perform a better initial guess of lambda no improvement""" mu, lam = main(orbitals, U, beta, step) mu2, lam2 = linspace(0, U*orbitals, step), zeros(step) for i in range(99): lam2[i+1] = fsolve(restriction, lam2[i], (mu2[i+1], orbitals, U, beta)) plot(mu2, 2*orbitals*fermi_dist(-(mu2+lam2), beta), label='Test guess') legend(loc=0) if __name__ == "gallery": mu, lam = main(2, 50, 2, 200)
Titan-C/slaveparticles
examples/spins/plot_deg_2orb_fill.py
Python
gpl-3.0
2,126
[ "DIRAC" ]
0e6e9611330d5fdbdf4532c9569880bdd0f3a726998ca58bb762f2a54ce6f5e7
""" """ import os.path import posixpath from collections import deque from glob import glob from logging import getLogger from galaxy.util import in_directory from .util import PathHelper log = getLogger(__name__) TYPES_TO_METHOD = dict( input="inputs_directory", unstructured="unstructured_files_directory", config="configs_directory", tool="tool_files_directory", jobdir="job_directory", workdir="working_directory", metadata="metadata_directory", output="outputs_directory", output_workdir="working_directory", output_metadata="metadata_directory", output_jobdir="job_directory", ) class RemoteJobDirectory: """ Representation of a (potentially) remote Pulsar-style staging directory. """ def __init__(self, remote_staging_directory, remote_id, remote_sep): self.path_helper = PathHelper(remote_sep) if remote_id: self.job_directory = self.path_helper.remote_join( remote_staging_directory, remote_id ) else: self.job_directory = remote_staging_directory def metadata_directory(self): return self._sub_dir('metadata') def working_directory(self): return self._sub_dir('working') def inputs_directory(self): return self._sub_dir('inputs') def outputs_directory(self): return self._sub_dir('outputs') def configs_directory(self): return self._sub_dir('configs') def tool_files_directory(self): return self._sub_dir('tool_files') def unstructured_files_directory(self): return self._sub_dir('unstructured') def default_tmp_directory(self): return self._sub_dir('tmp') @property def path(self): return self.job_directory @property def separator(self): return self.path_helper.separator def calculate_path(self, remote_relative_path, input_type): """ Only for used by Pulsar client, should override for managers to enforce security and make the directory if needed. """ directory, allow_nested_files, allow_globs = self._directory_for_file_type(input_type) return self.path_helper.remote_join(directory, remote_relative_path) def _directory_for_file_type(self, file_type): allow_nested_files = False # work_dir and input_extra are types used by legacy clients... # Obviously this client won't be legacy because this is in the # client module, but this code is reused on server which may # serve legacy clients. allow_nested_files = file_type in ['input', 'unstructured', 'output', 'output_workdir', 'metadata', 'output_metadata', 'tool'] allow_globs = file_type in ['output_workdir'] # TODO: tool profile version where this is invalid directory_source = getattr(self, TYPES_TO_METHOD.get(file_type, None), None) if not directory_source: raise Exception("Unknown file_type specified %s" % file_type) if callable(directory_source): directory_source = directory_source() return directory_source, allow_nested_files, allow_globs def _sub_dir(self, name): return self.path_helper.remote_join(self.job_directory, name) def get_mapped_file(directory, remote_path, allow_nested_files=False, local_path_module=os.path, mkdir=True, allow_globs=False): """ >>> import ntpath >>> get_mapped_file(r'C:\\pulsar\\staging\\101', 'dataset_1_files/moo/cow', allow_nested_files=True, local_path_module=ntpath, mkdir=False) 'C:\\\\pulsar\\\\staging\\\\101\\\\dataset_1_files\\\\moo\\\\cow' >>> get_mapped_file(r'C:\\pulsar\\staging\\101', 'dataset_1_files/moo/cow', allow_nested_files=False, local_path_module=ntpath) 'C:\\\\pulsar\\\\staging\\\\101\\\\cow' >>> get_mapped_file(r'C:\\pulsar\\staging\\101', '../cow', allow_nested_files=True, local_path_module=ntpath, mkdir=False) Traceback (most recent call last): Exception: Attempt to read or write file outside an authorized directory. """ if not allow_nested_files: name = local_path_module.basename(remote_path) path = local_path_module.join(directory, name) else: local_rel_path = __posix_to_local_path(remote_path, local_path_module=local_path_module) local_path = local_path_module.join(directory, local_rel_path) verify_is_in_directory(local_path, directory, local_path_module=local_path_module) local_directory = local_path_module.dirname(local_path) if mkdir and not local_path_module.exists(local_directory): os.makedirs(local_directory) path = local_path if allow_globs and ('*' in path or '?' in path): matches = glob(path) if len(matches) == 0: raise RuntimeError(f"No files matching glob: {path}") elif len(matches) > 1: log.warning(f"Found multiple files matching {path}, using the first match: {matches}") else: log.info(f"Glob path {path} mapped to matched file: {matches[0]}") path = matches[0] return path def __posix_to_local_path(path, local_path_module=os.path): """ Converts a posix path (coming from Galaxy), to a local path (be it posix or Windows). >>> import ntpath >>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=ntpath) 'dataset_1_files\\\\moo\\\\cow' >>> import posixpath >>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=posixpath) 'dataset_1_files/moo/cow' """ partial_path = deque() while True: if not path or path == '/': break (path, base) = posixpath.split(path) partial_path.appendleft(base) return local_path_module.join(*partial_path) def verify_is_in_directory(path, directory, local_path_module=os.path): if not in_directory(path, directory, local_path_module): msg = "Attempt to read or write file outside an authorized directory." log.warn("{} Attempted path: {}, valid directory: {}".format(msg, path, directory)) raise Exception(msg)
galaxyproject/pulsar
pulsar/client/job_directory.py
Python
apache-2.0
6,152
[ "Galaxy" ]
b4f6f80092203163f2fa1b71ee0fc28d4bb807d2d79dceb24b93ea85d3106a1b
""" Author: Thomas G. Close (tclose@oist.jp) Copyright: 2012-2014 Thomas G. Close. License: This file is part of the "NineLine" package, which is released under the MIT Licence, see LICENSE for details. """ from __future__ import absolute_import import pyNN.neuron.standardmodels.synapses from pype9.simulate.common.network.synapses import StaticSynapse # , ElectricalSynapse class StaticSynapse(StaticSynapse, pyNN.neuron.standardmodels.synapses.StaticSynapse): PyNNClass = pyNN.neuron.standardmodels.synapses.StaticSynapse # class ElectricalSynapse(ElectricalSynapse, # pyNN.neuron.standardmodels.synapses.ElectricalSynapse): # # PyNNElectricalSynapseClass = ( # pyNN.neuron.standardmodels.synapses.ElectricalSynapse)
tclose/PyPe9
pype9/simulate/neuron/network/synapses.py
Python
mit
803
[ "NEURON" ]
f9dac28a10028e65ac9d7088a2602262e1cff1fc44ba2e63495daa4bf1c44c88
import pylab as pyl import cPickle as pickle galaxies = pickle.load(open('./galaxies.pickle','rb')) galaxies = filter(lambda galaxy: galaxy.ston_I > 30., galaxies) mergers = pyl.asarray([galaxy.Merger for galaxy in galaxies]) icd = pyl.asarray([galaxy.ICD_IH*100 for galaxy in galaxies]) stack = pyl.column_stack((mergers, icd)) result =[] for i in range(10000): # shuffle the icd values pyl.shuffle(stack[:,1]) #get the high ICD ones gt = pyl.where(stack[:,1] > 20) # are they mergers? y = stack[:,0][gt] #how many mergers? m = len(y.nonzero()[0]) #what percentage? per = m/float(len(gt[0])) # save that percentage result.append(per) print len(result)
boada/ICD
sandbox/shuffle.py
Python
mit
712
[ "Galaxy" ]
d0b0482f441989c8a4650d23b995716ed4e64511a23439c60a8d8ca336837449
"""Tests for phonon calculation at specific q-points.""" import numpy as np from phonopy import Phonopy from phonopy.units import VaspToTHz def testQpoints(ph_nacl_nofcsym: Phonopy): """Test phonon calculation at specific q-points by NaCl.""" phonon = ph_nacl_nofcsym qpoints = [[0, 0, 0], [0, 0, 0.5]] phonon.run_qpoints(qpoints, with_dynamical_matrices=True) for i, q in enumerate(qpoints): dm = phonon.qpoints.dynamical_matrices[i] dm_eigs = np.linalg.eigvalsh(dm).real eigs = phonon.qpoints.eigenvalues[i] freqs = phonon.qpoints.frequencies[i] / VaspToTHz np.testing.assert_allclose(dm_eigs, eigs) np.testing.assert_allclose(freqs ** 2 * np.sign(freqs), eigs)
atztogo/phonopy
test/phonon/test_qpoints.py
Python
bsd-3-clause
735
[ "phonopy" ]
3ed3666f60084fdcec7e217789c9b5c0768284d201b358dbbd9e359b7f6fce3c
# -*- coding: utf-8 -*- # Copyright JS Foundation and other contributors, https://js.foundation/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, unicode_literals from .comment_handler import CommentHandler from .error_handler import Error from .jsx_parser import JSXParser from .jsx_syntax import JSXSyntax from .objects import toDict from .parser import Parser from .syntax import Syntax from .tokenizer import Tokenizer from .visitor import NodeVisitor from . import nodes from . import jsx_nodes __all__ = ['Syntax', 'JSXSyntax', 'Error', 'NodeVisitor', 'nodes', 'jsx_nodes', 'parse', 'parseModule', 'parseScript', 'tokenize', 'toDict'] def parse(code, options=None, delegate=None, **kwargs): options = {} if options is None else options.copy() options.update(kwargs) # ESNext presset: if options.get('esnext', False): options['jsx'] = True options['classProperties'] = True commentHandler = None def proxyDelegate(node, metadata): if delegate: new_node = delegate(node, metadata) if new_node is not None: node = new_node if commentHandler: commentHandler.visit(node, metadata) return node parserDelegate = None if delegate is None else proxyDelegate collectComment = options.get('comment', False) attachComment = options.get('attachComment', False) if collectComment or attachComment: commentHandler = CommentHandler() commentHandler.attach = attachComment options['comment'] = True parserDelegate = proxyDelegate isModule = options.get('sourceType', 'script') == 'module' if options.get('jsx', False): parser = JSXParser(code, options=options, delegate=parserDelegate) else: parser = Parser(code, options=options, delegate=parserDelegate) ast = parser.parseModule() if isModule else parser.parseScript() if collectComment and commentHandler: ast.comments = commentHandler.comments if parser.config.tokens: ast.tokens = parser.tokens if parser.config.tolerant: ast.errors = parser.errorHandler.errors return ast def parseModule(code, options=None, delegate=None, **kwargs): kwargs['sourceType'] = 'module' return parse(code, options, delegate, **kwargs) def parseScript(code, options=None, delegate=None, **kwargs): kwargs['sourceType'] = 'script' return parse(code, options, delegate, **kwargs) def tokenize(code, options=None, delegate=None, **kwargs): options = {} if options is None else options.copy() options.update(kwargs) tokenizer = Tokenizer(code, options) class Tokens(list): pass tokens = Tokens() try: while True: token = tokenizer.getNextToken() if not token: break if delegate: token = delegate(token) tokens.append(token) except Error as e: tokenizer.errorHandler.tolerate(e) if tokenizer.errorHandler.tolerant: tokens.errors = tokenizer.errors() return tokens
mp-coder/translate-dev-tools
esprima/esprima.py
Python
mit
4,382
[ "VisIt" ]
61d11acb422994de7b5b2793189375bb853fa8ac2c4b860ea9f6b33ac9ca583f
""" The list of words used by DPG """ words = [ "and", "ask", "ass", "ape", "ate", "axe", "air", "aim", "ana", "awe", "act", "add", "age", "all", "ant", "bat", "ban", "bar", "bed", "bee", "bet", "bit", "bug", "bob", "bot", "boy", "bud", "but", "cab", "can", "cap", "cat", "car", "cog", "con", "cop", "cot", "cow", "coy", "cub", "cut", "dad", "dam", "dan", "day", "den", "did", "dig", "dip", "doc", "dog", "don", "dot", "dry", "dug", "ear", "eat", "egg", "ego", "elf", "elk", "elm", "end", "eye", "eve", "fad", "fan", "far", "fat", "fax", "fig", "fit", "fix", "fly", "few", "foe", "fog", "for", "fur", "gag", "gap", "gel", "gem", "get", "god", "goo", "got", "gum", "gun", "gut", "guy", "gym", "hot", "how", "has", "had", "ham", "hat", "him", "her", "hit", "hop", "ice", "icy", "ill", "ink", "inn", "ion", "its", "ivy", "jam", "jar", "jaw", "jay", "jet", "jim", "joe", "jog", "jot", "joy", "jug", "keg", "ken", "key", "kid", "kim", "kit", "kin", "lab", "lad", "lap", "law", "lie", "lee", "let", "lip", "lob", "log", "lot", "low", "lug", "mac", "mag", "map", "man", "mat", "max", "meg", "men", "met", "mom", "moo", "mop", "mow", "mud", "mug", "mut", "nab", "nag", "nap", "net", "new", "nip", "nod", "not", "now", "nun", "nut", "oak", "oat", "oar", "off", "oil", "old", "one", "our", "out", "own", "pan", "pal", "pam", "pat", "pea", "pen", "pet", "pig", "pit", "pot", "rag", "ray", "run", "ram", "ran", "rap", "rat", "rig", "rip", "rob", "ron", "rot", "sad", "sag", "sam", "sat", "say", "see", "sex", "set", "she", "shy", "sin", "sir", "sit", "sky", "soy", "sun", "tan", "tap", "tar", "tea", "ted", "too", "the", "tim", "tip", "toe", "tom", "toy", "wag", "was", "wax", "way", "web", "wee", "wet", "why", "wig", "win", "wow", "won", "yak", "yam", "yap", "yen", "yep", "yes", "yet", "yew", "you", "yum", "zag", "zig", "zit", "zap", "zip", "zoo" ]
w8rbt/dpg
python/the_words.py
Python
isc
1,849
[ "Elk" ]
5716d2eca98796cae4b00b0b1bd751e795f194569b0f5d42c778eb8e70f9490e
""" Copyright (C) 2014, Jaguar Land Rover This program is licensed under the terms and conditions of the Mozilla Public License, version 2.0. The full text of the Mozilla Public License is at https://www.mozilla.org/MPL/2.0/ Rudolf Streif (rstreif@jaguarlandrover.com) """ from __future__ import absolute_import import sys, os, logging, time, jsonrpclib, base64 import Queue from urlparse import urlparse from django.conf import settings import devices.models # Logging setup logger = logging.getLogger('rvi') # Globals transaction_id = 0 def send_remote(remote): """ Notify destination, typically a vehicle, of a pending software update. :param retry: sota.models.Retry object """ logger.info('%s: Sending Remote.', remote) global transaction_id # get settings # service edge url try: rvi_service_url = settings.RVI_SERVICE_EDGE_URL except NameError: logger.error('%s: RVI_SERVICE_EDGE_URL not defined. Check settings!', remote) return False # DM service id try: rvi_service_id = settings.RVI_DM_SERVICE_ID except NameError: rvi_service_id = '/dm' # Signature algorithm try: alg = settings.RVI_BACKEND_ALG_SIG except NameError: alg = 'RS256' # Server Key try: keyfile = open(settings.RVI_BACKEND_KEYFILE, 'r') key = keyfile.read() except Exception as e: logger.error('%s: Cannot read server key: %s', remote, e) return False # Create and sign certificate try: cert = remote.encode_jwt(key, alg) except Exception as e: logger.error('%s: Cannot create and sign certificate: %s', remote, e) return False # establish outgoing RVI service edge connection rvi_server = None logger.info('%s: Establishing RVI service edge connection: %s', remote, rvi_service_url) try: rvi_server = jsonrpclib.Server(rvi_service_url) except Exception as e: logger.error('%s: Cannot connect to RVI service edge: %s', remote, e) return False logger.info('%s: Established connection to RVI Service Edge: %s', remote, rvi_server) # get destination info mobile = remote.rem_device dst_url = mobile.get_rvi_id() # notify remote of pending file transfer transaction_id += 1 try: rvi_server.message(calling_service = rvi_service_id, service_name = dst_url + rvi_service_id + '/cert_provision', transaction_id = str(transaction_id), timeout = int(time.time()) + 5000, parameters = [{ u'certid': remote.rem_uuid }, { u'certificate': cert }, ]) except Exception as e: logger.error('%s: Cannot connect to RVI service edge: %s', remote, e) return False logger.info('%s: Sent Certificate.', remote) return True
afan1/rvi_backend
web/devices/tasks.py
Python
mpl-2.0
3,005
[ "Jaguar" ]
5aaa0789af53f4eefee0c729e1c14a93310af9022689cebb8be04e15675a0c27
Python 2.7.5 (default, May 15 2013, 22:43:36) [MSC v.1500 32 bit (Intel)] on win32 Type "help", "copyright", "credits" or "license" for more information. >>> f=file('hello.txt') >>> lines=f.readlines() >>> for line in lines: ... print (line,) ... ('hello world\n',) ('hello china\n',) ('hello hushupei',) >>> f.close() >>> rf=open('hello.txt') >>> content=rf.read() >>> print(content) hello world hello china hello hushupei >>> rf.close() >>> f=file('hello.txt','w+') >>> li=['hello NGI','hello SDN'] >>> f.writelines(li) >>> f.close() >>> f=file('hello.txt','a+') >>> new_line='\nhello world\nhello china\nhello hushupei' >>> f.write(new_line) >>> f.close() >>> import os >>> rename('./hello.txt','helloworld.txt') Traceback (most recent call last): File "<stdin>", line 1, in <module> NameError: name 'rename' is not defined >>> print stat('./hello.txt') Traceback (most recent call last): File "<stdin>", line 1, in <module> NameError: name 'stat' is not defined >>> print os.stat('./hello.txt') nt.stat_result(st_mode=33206, st_ino=0L, st_dev=0, st_nlink=0, st_uid=0, st_gid=0, st_size=60L, st_atime=1468591074L, st_mtime=1468591989L, st_ctime=1468591074L) >>> os.rename('./hello.txt','helloworld.txt') >>> os.remove('./haha.txt') Traceback (most recent call last): File "<stdin>", line 1, in <module> WindowsError: [Error 2] : './haha.txt' >>> f=file('haha.txt','w+') >>> f.close() >>> os.remove('./haha.txt') >>> os.abspath('./') Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: 'module' object has no attribute 'abspath' >>> os.abspath('.') Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: 'module' object has no attribute 'abspath' >>> import os.path >>> os.path.abspath('.') 'E:\\TestGit' >>> os.path.dirname('.') '' >>> print (os.path.exist('./helloworld.txt')) Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: 'module' object has no attribute 'exist' >>> print (os.path.exists('./helloworld.txt')) True >>> print (os.path.getatime('./helloworld.txt')) 1468591074.0 >>> print (os.path.getctime('./helloworld.txt')) 1468591074.0 >>> print (os.path.getmtime('./helloworld.txt')) 1468591989.76 >>> print(os.path.isabs('.')) False >>> print(os.path.isdir('.')) True >>> print(os.path.isfile('.')) False >>> print(os.path.split('./helloworld.txt')) ('.', 'helloworld.txt') >>> print(os.path.splitext('./helloworld.txt')) ('./helloworld', '.txt') >>> print(os.path.splitdrive('./helloworld.txt')) ('', './helloworld.txt') >>> print(os.path.split('E:/TestGit/helloworld.txt')) ('E:/TestGit', 'helloworld.txt') >>> print(os.path.splitdrive('E:/TestGit/helloworld.txt')) ('E:', '/TestGit/helloworld.txt') >>> src=file('hello.txt',"w") >>> li=['hello world\n','hello china\n'] >>> src.writelines(li) >>> src.close() >>> dst=open('hello2.txt','w') >>> src=open('hello.txt','r') >>> dst.write(src.read()) >>> src.close() >>> dst.close() >>> os.remove('hello2.txt') >>> import shutil >>> shutil.copyfile('hello.txt','hello2.txt') >>> shutil.move('hello.txt','../') >>> shutil.move('hello.txt','hello3.txt') >>> help(os.walk) Help on function walk in module os: walk(top, topdown=True, onerror=None, followlinks=False) Directory tree generator. For each directory in the directory tree rooted at top (including top itself, but excluding '.' and '..'), yields a 3-tuple dirpath, dirnames, filenames dirpath is a string, the path to the directory. dirnames is a list of the names of the subdirectories in dirpath (excluding '.' and '..'). filenames is a list of the names of the non-directory files in dirpath. Note that the names in the lists are just names, with no path components. To get a full path (which begins with top) to a file or directory in dirpath, do os.path.join(dirpath, name). If optional arg 'topdown' is true or not specified, the triple for a directory is generated before the triples for any of its subdirectories (directories are generated top down). If topdown is false, the triple for a directory is generated after the triples for all of its subdirectories (directories are generated bottom up). When topdown is true, the caller can modify the dirnames list in-place (e.g., via del or slice assignment), and walk will only recurse into the subdirectories whose names remain in dirnames; this can be used to prune the search, or to impose a specific order of visiting. Modifying dirnames when topdown is false is ineffective, since the directories in dirnames have already been generated by the time dirnames itself is generated. By default errors from the os.listdir() call are ignored. If optional arg 'onerror' is specified, it should be a function; it will be called with one argument, an os.error instance. It can report the error to continue with the walk, or raise the exception to abort the walk. Note that the filename is available as the filename attribute of the exception object. By default, os.walk does not follow symbolic links to subdirectories on systems that support them. In order to get this functionality, set the optional argument 'followlinks' to true. Caution: if you pass a relative pathname for top, don't change the current working directory between resumptions of walk. walk never changes the current directory, and assumes that the client doesn't either. Example: import os from os.path import join, getsize for root, dirs, files in os.walk('python/Lib/email'): print root, "consumes", print sum([getsize(join(root, name)) for name in files]), print "bytes in", len(files), "non-directory files" if 'CVS' in dirs: dirs.remove('CVS') # don't visit CVS directories >>>
hushupei/PyCode
b.py
Python
gpl-3.0
5,909
[ "VisIt" ]
c3b8433f1a70732d2fc339379e7685bd4697a3f524432b56df586479c134f2de