input
stringlengths
2.65k
237k
output
stringclasses
1 value
<gh_stars>1-10 ###################### # (c) 2012 <NAME> <<EMAIL>> # License: BSD 3-clause # # Implements structured SVM as described in Joachims et. al. # Cutting-Plane Training of Structural SVMs #def warn(*args, **kwargs): # pass #import warnings #warnings.warn = warn from time import time import numpy as np import cvxopt import cvxopt.solvers #from sklearn.externals.joblib import Parallel, delayed #from joblib import Parallel, delayed from basic_ssvm import BaseSSVM from basic_inference import loss_augmented_inference import multiprocessing import sys import copy class NoConstraint(Exception): # raised if we can not construct a constraint from cache pass class OneSlackSSVM(BaseSSVM): """Structured SVM solver for the 1-slack QP with l1 slack penalty. Implements margin rescaled structural SVM using the 1-slack formulation and cutting plane method, solved using CVXOPT. The optimization is restarted in each iteration. Parameters ---------- model : StructuredModel Object containing the model structure. Has to implement `loss`, `inference` and `loss_augmented_inference`. max_iter : int, default=10000 Maximum number of passes over dataset to find constraints. C : float, default=1 Regularization parameter. check_constraints : bool Whether to check if the new "most violated constraint" is more violated than previous constraints. Helpful for stopping and debugging, but costly. verbose : int Verbosity. negativity_constraint : list of ints Indices of parmeters that are constraint to be negative. This is useful for learning submodular CRFs (inference is formulated as maximization in SSVMs, flipping some signs). break_on_bad : bool default=False Whether to break (start debug mode) when inference was approximate. n_jobs : int, default=1 Number of parallel jobs for inference. -1 means as many as cpus. show_loss_every : int, default=0 Controlls how often the hamming loss is computed (for monitoring purposes). Zero means never, otherwise it will be computed very show_loss_every'th epoch. tol : float, default=1e-3 Convergence tolerance. If dual objective decreases less than tol, learning is stopped. The default corresponds to ignoring the behavior of the dual objective and stop only if no more constraints can be found. inference_cache : int, default=0 How many results of loss_augmented_inference to cache per sample. If > 0 the most violating of the cached examples will be used to construct a global constraint. Only if this constraint is not violated, inference will be run again. This parameter poses a memory / computation tradeoff. Storing more constraints might lead to RAM being exhausted. Using inference_cache > 0 is only advisable if computation time is dominated by inference. cache_tol : float, None or 'auto' default='auto' Tolerance when to reject a constraint from cache (and do inference). If None, ``tol`` will be used. Higher values might lead to faster learning. 'auto' uses a heuristic to determine the cache tolerance based on the duality gap, as described in [3]. inactive_threshold : float, default=1e-5 Threshold for dual variable of a constraint to be considered inactive. inactive_window : float, default=50 Window for measuring inactivity. If a constraint is inactive for ``inactive_window`` iterations, it will be pruned from the QP. If set to 0, no constraints will be removed. switch_to : None or string, default=None Switch to the given inference method if the previous method does not find any more constraints. logger : logger object, default=None Pystruct logger for storing the model or extracting additional information. Attributes ---------- w : nd-array, shape=(model.size_joint_feature,) The learned weights of the SVM. old_solution : dict The last solution found by the qp solver. ``loss_curve_`` : list of float List of loss values if show_loss_every > 0. ``objective_curve_`` : list of float Cutting plane objective after each pass through the dataset. ``primal_objective_curve_`` : list of float Primal objective after each pass through the dataset. ``timestamps_`` : list of int Total training time stored before each iteration. References ---------- [1] <NAME>, and <NAME> and <NAME>: Cutting-plane training of structural SVMs, JMLR 2009 [2] <NAME>: Methods for Learning Structured Prediction in Semantic Segmentation of Natural Images, PhD Thesis. 2014 [3] <NAME> and <NAME>: Learning a Loopy Model For Semantic Segmentation Exactly, VISAPP 2014 """ def __init__(self, model, max_iter=10000, C=1.0, check_constraints=False, verbose=0, negativity_constraint=None, n_jobs=1, break_on_bad=False, show_loss_every=0, tol=1e-3, inference_cache=0, inactive_threshold=1e-5, inactive_window=50, logger=None, cache_tol='auto', switch_to=None, pooling=None, log = None): BaseSSVM.__init__(self, model, max_iter, C, verbose=verbose, n_jobs=n_jobs, show_loss_every=show_loss_every, logger=logger) self.negativity_constraint = negativity_constraint self.check_constraints = check_constraints self.break_on_bad = break_on_bad self.tol = tol self.cache_tol = cache_tol self.inference_cache = inference_cache self.inactive_threshold = inactive_threshold self.inactive_window = inactive_window self.switch_to = switch_to self.pooling=pooling self.log = log def writeLog(self, string, toconsole = False): logfile = open(self.log, 'a') logfile.write(string+"\n") logfile.close() if toconsole is True: print(string) def _solve_1_slack_qp(self, constraints, n_samples): C = np.float(self.C) * n_samples # this is how libsvm/svmstruct do it joint_features = [c[0] for c in constraints] #print("joint_features: {}".format(len(joint_features))) losses = [c[1] for c in constraints] nonzerow=len(self.w) #nonzerow=0 n_constraints = len(joint_features)+nonzerow if nonzerow==0: joint_feature_matrix = np.vstack(joint_features) else: joint_feature_matrix = np.vstack((joint_features,np.identity(nonzerow))) #print("joint feature_matrix: {}".format(joint_feature_matrix.shape)) P = cvxopt.matrix(np.dot(joint_feature_matrix, joint_feature_matrix.T)) # q contains loss from margin-rescaling #print(np.concatenate([-np.array(losses, dtype=np.float),np.zeros(len(self.w))])) #input("Press Enter to continue...") if nonzerow==0: q = cvxopt.matrix(-np.array(losses, dtype=np.float)) else: q = cvxopt.matrix(np.concatenate([-np.array(losses, dtype=np.float),np.zeros(nonzerow)])) # constraints: all alpha must be >zero idy = np.identity(n_constraints) tmp1 = np.zeros(n_constraints) # constraints: all weights must be >= zero #w_idy = np.identity(n_constraints) #w_tmp1 = np.zeros(n_constraints) # positivity constraints: if self.negativity_constraint is None: # empty constraints zero_constr = np.zeros(0) joint_features_constr = np.zeros((0, n_constraints)) else: joint_features_constr = joint_feature_matrix.T[self.negativity_constraint] zero_constr = np.zeros(len(self.negativity_constraint)) # put together G = cvxopt.sparse(cvxopt.matrix(np.vstack((-idy, joint_features_constr)))) h = cvxopt.matrix(np.hstack((tmp1, zero_constr))) # equality constraint: sum of all alpha must be = C a1 =np.ones(len(joint_features)) a2 =np.zeros(nonzerow) #print(a1) #print(a2) #np.concatenate([a1,a2]) if nonzerow==0: A = cvxopt.matrix(np.ones((1,len(joint_features)))) else: A = cvxopt.matrix(np.asmatrix([np.concatenate([a1,a2])])) b = cvxopt.matrix([C]) # solve QP model cvxopt.solvers.options['feastol'] = 1e-5 try: solution = cvxopt.solvers.qp(P, q, G, h, A, b) except ValueError: solution = {'status': 'error'} if solution['status'] != "optimal": print("regularizing QP!") P = cvxopt.matrix(np.dot(joint_feature_matrix, joint_feature_matrix.T) + 1e-8 * np.eye(joint_feature_matrix.shape[0])) solution = cvxopt.solvers.qp(P, q, G, h, A, b) if solution['status'] != "optimal": raise ValueError("QP solver failed. Try regularizing your QP.") # Lagrange multipliers a = np.ravel(solution['x']) self.old_solution = solution self.prune_constraints(constraints, a) # Support vectors have non zero lagrange multipliers sv = a > self.inactive_threshold * C if self.verbose > 1: print("%d support vectors out of %d points" % (np.sum(sv), n_constraints)) self.w = np.dot(a, joint_feature_matrix) #print(self.w) # we needed to flip the sign to make the dual into a minimization # model return -solution['primal objective'] def prune_constraints(self, constraints, a): # append list for new constraint self.alphas.append([]) assert(len(self.alphas) == len(constraints)) for constraint, alpha in zip(self.alphas, a): constraint.append(alpha) constraint = constraint[-self.inactive_window:] # prune unused constraints: # if the max of alpha in last 50 iterations was small, throw away if self.inactive_window != 0: max_active = [np.max(constr[-self.inactive_window:]) for constr in self.alphas] # find strongest constraint that is not ground truth constraint strongest = np.max(max_active[1:]) inactive = np.where(max_active < self.inactive_threshold * strongest)[0] for idx in reversed(inactive): # if we don't reverse, we'll mess the indices up del constraints[idx] del self.alphas[idx] def _check_bad_constraint(self, violation, djoint_feature_mean, loss, old_constraints, break_on_bad, tol=None): violation_difference = violation - self.last_slack_ #print(djoint_feature_mean) if self.verbose > 1: print("New violation: %f difference to last: %f" % (violation, violation_difference)) if violation_difference < 0 and violation > 0 and break_on_bad: raise ValueError("Bad inference: new violation is smaller than" " old.") if tol is None: tol = self.tol if violation_difference < tol: if self.verbose: print("new constraint too weak.") return True equals = [True for djoint_feature_, loss_ in old_constraints if (np.all(djoint_feature_ == djoint_feature_mean) and loss == loss_)] if np.any(equals): print("iam HERE") return True if self.check_constraints: for con in old_constraints: # compute violation for old constraint violation_tmp = max(con[1] - np.dot(self.w, con[0]), 0) if self.verbose > 5: print("violation old constraint: %f" % violation_tmp) # if violation of new constraint is smaller or not # significantly larger, don't add constraint. # if smaller, complain about approximate inference. if violation - violation_tmp < -1e-5: if self.verbose: print("bad inference: %f" % (violation_tmp - violation)) if break_on_bad: raise ValueError("Bad inference: new violation is" " weaker than previous constraint.") return True return False @classmethod def constraint_equal(cls, y_1, y_2): """ This now more complex. y_1 and/or y_2 (I think) can be: array, pair of arrays,
<filename>data/dataset.py #!/usr/bin/env python3 # Copyright 2018 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @title :dataset.py # @author :ch # @contact :<EMAIL> # @created :08/06/2018 # @version :1.0 # @python_version :3.6.6 """ Dataset Interface ----------------- The module :mod:`data.dataset` contains a template for a dataset interface, that can be used to feed data into neural networks. The implementation is based on an earlier implementation of a class I used in another project: https://git.io/fN1a6 At the moment, the class holds all data in memory and is therefore not meant for bigger datasets. Though, it is easy to design wrappers that overcome this limitation (e.g., see abstract base class :class:`data.large_img_dataset.LargeImgDataset`). .. autosummary:: data.dataset.Dataset.get_test_ids data.dataset.Dataset.get_train_ids data.dataset.Dataset.get_val_ids data.dataset.Dataset.get_test_inputs data.dataset.Dataset.get_test_outputs data.dataset.Dataset.get_train_inputs data.dataset.Dataset.get_train_outputs data.dataset.Dataset.get_val_inputs data.dataset.Dataset.get_val_outputs data.dataset.Dataset.input_to_torch_tensor data.dataset.Dataset.is_image_dataset data.dataset.Dataset.next_test_batch data.dataset.Dataset.next_train_batch data.dataset.Dataset.next_val_batch data.dataset.Dataset.test_iterator data.dataset.Dataset.train_iterator data.dataset.Dataset.val_iterator data.dataset.Dataset.output_to_torch_tensor data.dataset.Dataset.plot_samples data.dataset.Dataset.reset_batch_generator data.dataset.Dataset.tf_input_map data.dataset.Dataset.tf_output_map data.dataset.Dataset.test_ids_to_indices data.dataset.Dataset.train_ids_to_indices data.dataset.Dataset.val_ids_to_indices """ from abc import ABC, abstractmethod import numpy as np from sklearn.preprocessing import OneHotEncoder import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import numpy.matlib as npm class Dataset(ABC): """A general dataset template that can be used as a simple and consistent interface. Note, that this is an abstract class that should not be instantiated. In order to write an interface for another dataset, you have to implement an inherited class. You must always call the constructor of this base class first when instantiating the implemented subclass. Note, the internals are stored in the private member ``_data``, that is described in the constructor. Attributes: classification: Whether the dataset is a classification or regression dataset. sequence: Whether the dataset contains sequences (samples have temporal structure). In case of a sequence dataset, the temporal structure can be decoded via the shape attributes of in- and outputs. Note, that all samples are internally zero-padded to the same length. num_classes: The number of classes for a classification task (``None`` otherwise). is_one_hot: Whether output labels are one-hot encoded for a classification task (``None`` otherwise). in_shape: The original shape of an input sample. Note, that samples are encoded by this class as individual vectors (e.g., an MNIST sample is ancoded as 784 dimensional vector, but its original shape is: ``[28, 28, 1]``). A sequential sample is encoded by concatenating all timeframes. Hence, the number of timesteps can be decoded by dividing a single sample vector by np.prod(in_shape). out_shape: The original shape of an output sample (see :attr:`in_shape`). num_train_samples: The number of training samples. num_test_samples: The number of test samples. num_val_samples: The number of validation samples. shuffle_test_samples: Whether the method :meth:`next_test_batch` returns test samples in random order at every epoch. Defaults to ``True``, i.e., samples have a random ordering every epoch. shuffle_val_samples: Same as :attr:`shuffle_test_samples` for samples from the validation set. """ def __init__(self): # Internally, everything is stored in a certain structure, such that it # can easily be backuped (for instance via pickle). data = {} # Boolean: See attribute "classification". data.setdefault('classification', None) # Boolean: See attribute "sequence". data.setdefault('sequence', None) # Integer: See attribute "num_classes". data.setdefault('num_classes', None) # Integer: See attribute "is_one_hot". data.setdefault('is_one_hot', None) # A 2D numpy array, containing a sample input in each row (all samples # are encoded as single vectors.) data.setdefault('in_data', None) # A 2D numpy array, containing a sample output in each row (all samples # are encoded as single vectors.) data.setdefault('out_data', None) # List or numpy array: See attribute "in_shape". data.setdefault('in_shape', []) # List or numpy array: See attribute "in_shape". data.setdefault('out_shape', []) # List or numpy array: All row indices of "in_data" or "out_data", that # correspond to samples belonging to the training set. data.setdefault('train_inds', []) # List or numpy array: All row indices of "in_data" or "out_data", that # correspond to samples belonging to the test set. data.setdefault('test_inds', []) # List or numpy array: All row indices of "in_data" or "out_data", that # correspond to samples belonging to the validation set. data.setdefault('val_inds', None) self._data = data # These are other private attributes, that are not in the data dict # as there would be no reason to pickle them. self._batch_gen_train = None self._batch_gen_test = None self._batch_gen_val = None # We only need to fit the one-hot encoder for this dataset once. self._one_hot_encoder = None self._shuffle_test_samples = True self._shuffle_val_samples = True # TODO deprecate this attribute. Instead, distinguish between multi and # single label encoding. @property def classification(self): """Getter for read-only attribute :attr:`classification`.""" return self._data['classification'] @property def sequence(self): """Getter for read-only attribute :attr:`sequence`.""" return self._data['sequence'] @property def num_classes(self): """Getter for read-only attribute :attr:`num_classes`.""" return self._data['num_classes'] @property def is_one_hot(self): """Getter for read-only attribute :attr:`is_one_hot`.""" return self._data['is_one_hot'] @property def in_shape(self): """Getter for read-only attribute :attr:`in_shape`.""" return self._data['in_shape'] @property def out_shape(self): """Getter for read-only attribute :attr:`out_shape`.""" return self._data['out_shape'] @property def num_train_samples(self): """Getter for read-only attribute :attr:`num_train_samples`.""" return np.size(self._data['train_inds']) @property def num_test_samples(self): """Getter for read-only attribute :attr:`num_test_samples`.""" return np.size(self._data['test_inds']) @property def num_val_samples(self): """Getter for read-only attribute :attr:`num_val_samples`.""" if self._data['val_inds'] is None: return 0 return np.size(self._data['val_inds']) @property def shuffle_test_samples(self): """Getter attribute :attr:`shuffle_test_samples`.""" return self._shuffle_test_samples @shuffle_test_samples.setter def shuffle_test_samples(self, value): """Setter for attribute :attr:`shuffle_test_samples`. Note, a call to this method will reset the current generator, such that the next call to the method :meth:`next_test_batch` results in starting a sweep through a new epoch (full batch). """ self._shuffle_test_samples = value self._batch_gen_test = None @property def shuffle_val_samples(self): """Getter for attribute :attr:`shuffle_val_samples`.""" return self._shuffle_val_samples @shuffle_val_samples.setter def shuffle_val_samples(self, value): """Setter for attribute :attr:`shuffle_val_samples`. See documentation of setter for attribute :attr:`shuffle_test_samples`. """ self._shuffle_val_samples = value self._batch_gen_val = None def get_train_ids(self): """Get unique identifiers all training samples. Each sample in the dataset has a unique identifier (independent of the dataset split it is assigned to). Note: Sample identifiers do not correspond to the indices of samples within a dataset split (i.e., the returned identifiers of this method cannot be used as indices for the returned arrays of methods :meth:`get_train_inputs` and :meth:`get_train_outputs`) Returns: (numpy.ndarray): A 1D numpy array containing the unique identifiers for all training samples. """ return self._data['train_inds'] def get_test_ids(self): """Get unique identifiers all test samples. See documentation of method :meth:`get_train_ids` for details. Returns: (numpy.ndarray): A 1D numpy array. """ return self._data['test_inds'] def get_val_ids(self): """Get unique identifiers all validation samples. See documentation of method :meth:`get_train_ids` for details. Returns: (numpy.ndarray): A 1D numpy array. Returns ``None`` if no validation set exists. """ if self._data['val_inds'] is None: return None return self._data['val_inds'] def get_train_inputs(self): """Get the inputs of all training samples. Note, that each sample is encoded as a single vector. One may use the attribute :attr:`in_shape` to decode the actual shape of an input sample. Returns: (numpy.ndarray): A 2D numpy array, where each row encodes a training sample. """ return self._data['in_data'][self._data['train_inds'], :] def get_test_inputs(self): """Get the inputs of all test samples. See documentation of method :meth:`get_train_inputs` for details. Returns: (numpy.ndarray): A 2D numpy array. """ return self._data['in_data'][self._data['test_inds'], :] def get_val_inputs(self): """Get the inputs of all validation samples. See documentation of method :meth:`get_train_inputs` for details. Returns: (numpy.ndarray): A 2D numpy array. Returns ``None`` if no validation set exists. """ if self._data['val_inds'] is None: return None return self._data['in_data'][self._data['val_inds'], :] def get_train_outputs(self, use_one_hot=None): """Get the outputs (targets) of all training samples. Note, that each sample is encoded as a single vector. One may use the attribute :attr:`out_shape` to decode the actual shape of an output sample. Keep in mind, that classification samples might be one-hot encoded. Args: use_one_hot (bool): For classification samples, the encoding of the returned samples can be either "one-hot" or "class index". This option is ignored for datasets other than classification sets. If ``None``, the dataset its default encoding is returned. Returns: (numpy.ndarray): A 2D numpy array, where each row encodes a training target.
and there is at least one cased character in S, False otherwise. """ def isnumeric(): """S.isnumeric() -> bool Return True if there are only numeric characters in S, False otherwise. """ def isprintable(): """S.isprintable() -> bool Return True if all characters in S are considered printable in repr() or S is empty, False otherwise. """ def isspace(): """S.isspace() -> bool Return True if all characters in S are whitespace and there is at least one character in S, False otherwise. """ def istitle(): """S.istitle() -> bool Return True if S is a titlecased string and there is at least one character in S, i.e. upper- and titlecase characters may only follow uncased characters and lowercase characters only cased ones. Return False otherwise. """ def isupper(): """S.isupper() -> bool Return True if all cased characters in S are uppercase and there is at least one cased character in S, False otherwise. """ def join(): """S.join(iterable) -> unicode Return a string which is the concatenation of the strings in the iterable. The separator between elements is S. """ def ljust(): """S.ljust(width[, fillchar]) -> int Return S left-justified in a Unicode string of length width. Padding is done using the specified fill character (default is a space). """ def lower(): """S.lower() -> unicode Return a copy of the string S converted to lowercase. """ def lstrip(): """S.lstrip([chars]) -> unicode Return a copy of the string S with leading whitespace removed. If chars is given and not None, remove characters in chars instead. If chars is a str, it will be converted to unicode before stripping """ def maketrans(): """str.maketrans(x[, y[, z]]) -> dict (static method) Return a translation table usable for str.translate(). If there is only one argument, it must be a dictionary mapping Unicode ordinals (integers) or characters to Unicode ordinals, strings or None. Character keys will be then converted to ordinals. If there are two arguments, they must be strings of equal length, and in the resulting dictionary, each character in x will be mapped to the character at the same position in y. If there is a third argument, it must be a string, whose characters will be mapped to None in the result. """ def partition(): """S.partition(sep) -> (head, sep, tail) Search for the separator sep in S, and return the part before it, the separator itself, and the part after it. If the separator is not found, return S and two empty strings. """ def replace(): """S.replace(old, new[, count]) -> unicode Return a copy of S with all occurrences of substring old replaced by new. If the optional argument count is given, only the first count occurrences are replaced. """ def rfind(): """S.rfind(sub[, start[, end]]) -> int Return the highest index in S where substring sub is found, such that sub is contained within S[start:end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure. """ def rindex(): """S.rindex(sub[, start[, end]]) -> int Like S.rfind() but raise ValueError when the substring is not found. """ def rjust(): """S.rjust(width[, fillchar]) -> unicode Return S right-justified in a Unicode string of length width. Padding is done using the specified fill character (default is a space). """ def rpartition(): """S.rpartition(sep) -> (head, sep, tail) Search for the separator sep in S, starting at the end of S, and return the part before it, the separator itself, and the part after it. If the separator is not found, return two empty strings and S. """ def rsplit(): """S.rsplit(sep=None, maxsplit=-1) -> list of strings Return a list of the words in S, using sep as the delimiter string, starting at the end of the string and working to the front. If maxsplit is given, at most maxsplit splits are done. If sep is not specified, any whitespace string is a separator. """ def rstrip(): """S.rstrip([chars]) -> unicode Return a copy of the string S with trailing whitespace removed. If chars is given and not None, remove characters in chars instead. If chars is a str, it will be converted to unicode before stripping """ def split(): """S.split(sep=None, maxsplit=-1) -> list of strings Return a list of the words in S, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or is None, any whitespace string is a separator and empty strings are removed from the result. """ def splitlines(): """S.splitlines(keepends=False) -> list of strings Return a list of the lines in S, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true. """ def startswith(): """S.startswith(prefix[, start[, end]]) -> bool Return True if S starts with the specified prefix, False otherwise. With optional start, test S beginning at that position. With optional end, stop comparing S at that position. prefix can also be a tuple of strings to try. """ def strip(): """S.strip([chars]) -> unicode Return a copy of the string S with leading and trailing whitespace removed. If chars is given and not None, remove characters in chars instead. If chars is a str, it will be converted to unicode before stripping """ def swapcase(): """S.swapcase() -> unicode Return a copy of S with uppercase characters converted to lowercase and vice versa. """ def title(): """S.title() -> unicode Return a titlecased version of S, i.e. words start with title case characters, all remaining cased characters have lower case. """ def translate(): """S.translate(table) -> unicode Return a copy of the string S, where all characters have been mapped through the given translation table, which must be a mapping of Unicode ordinals to Unicode ordinals, Unicode strings or None. Unmapped characters are left untouched. Characters mapped to None are deleted. """ def upper(): """S.upper() -> unicode Return a copy of S converted to uppercase. """ def zfill(): """S.zfill(width) -> unicode Pad a numeric string S with zeros on the left, to fill a field of the specified width. The string S is never truncated. """ W_UnicodeObject.typedef = TypeDef( "str", __new__ = interp2app(W_UnicodeObject.descr_new), __doc__ = UnicodeDocstrings.__doc__, __repr__ = interp2app(W_UnicodeObject.descr_repr, doc=UnicodeDocstrings.__repr__.__doc__), __str__ = interp2app(W_UnicodeObject.descr_str, doc=UnicodeDocstrings.__str__.__doc__), __hash__ = interp2app(W_UnicodeObject.descr_hash, doc=UnicodeDocstrings.__hash__.__doc__), __eq__ = interp2app(W_UnicodeObject.descr_eq, doc=UnicodeDocstrings.__eq__.__doc__), __ne__ = interp2app(W_UnicodeObject.descr_ne, doc=UnicodeDocstrings.__ne__.__doc__), __lt__ = interp2app(W_UnicodeObject.descr_lt, doc=UnicodeDocstrings.__lt__.__doc__), __le__ = interp2app(W_UnicodeObject.descr_le, doc=UnicodeDocstrings.__le__.__doc__), __gt__ = interp2app(W_UnicodeObject.descr_gt, doc=UnicodeDocstrings.__gt__.__doc__), __ge__ = interp2app(W_UnicodeObject.descr_ge, doc=UnicodeDocstrings.__ge__.__doc__), __iter__ = interp2app(W_UnicodeObject.descr_iter, doc=UnicodeDocstrings.__iter__.__doc__), __len__ = interp2app(W_UnicodeObject.descr_len, doc=UnicodeDocstrings.__len__.__doc__), __contains__ = interp2app(W_UnicodeObject.descr_contains, doc=UnicodeDocstrings.__contains__.__doc__), __add__ = interp2app(W_UnicodeObject.descr_add, doc=UnicodeDocstrings.__add__.__doc__), __mul__ = interp2app(W_UnicodeObject.descr_mul, doc=UnicodeDocstrings.__mul__.__doc__), __rmul__ = interp2app(W_UnicodeObject.descr_mul, doc=UnicodeDocstrings.__rmul__.__doc__), __getitem__ = interp2app(W_UnicodeObject.descr_getitem, doc=UnicodeDocstrings.__getitem__.__doc__), capitalize = interp2app(W_UnicodeObject.descr_capitalize, doc=UnicodeDocstrings.capitalize.__doc__), casefold = interp2app(W_UnicodeObject.descr_casefold, doc=UnicodeDocstrings.casefold.__doc__), center = interp2app(W_UnicodeObject.descr_center, doc=UnicodeDocstrings.center.__doc__), count = interp2app(W_UnicodeObject.descr_count, doc=UnicodeDocstrings.count.__doc__), encode = interp2app(W_UnicodeObject.descr_encode, doc=UnicodeDocstrings.encode.__doc__), expandtabs = interp2app(W_UnicodeObject.descr_expandtabs, doc=UnicodeDocstrings.expandtabs.__doc__), find = interp2app(W_UnicodeObject.descr_find, doc=UnicodeDocstrings.find.__doc__), rfind = interp2app(W_UnicodeObject.descr_rfind, doc=UnicodeDocstrings.rfind.__doc__), index = interp2app(W_UnicodeObject.descr_index, doc=UnicodeDocstrings.index.__doc__), rindex = interp2app(W_UnicodeObject.descr_rindex, doc=UnicodeDocstrings.rindex.__doc__), isalnum = interp2app(W_UnicodeObject.descr_isalnum, doc=UnicodeDocstrings.isalnum.__doc__), isalpha = interp2app(W_UnicodeObject.descr_isalpha, doc=UnicodeDocstrings.isalpha.__doc__), isdecimal = interp2app(W_UnicodeObject.descr_isdecimal, doc=UnicodeDocstrings.isdecimal.__doc__), isdigit = interp2app(W_UnicodeObject.descr_isdigit, doc=UnicodeDocstrings.isdigit.__doc__), isidentifier = interp2app(W_UnicodeObject.descr_isidentifier, doc=UnicodeDocstrings.isidentifier.__doc__), islower = interp2app(W_UnicodeObject.descr_islower, doc=UnicodeDocstrings.islower.__doc__), isnumeric = interp2app(W_UnicodeObject.descr_isnumeric, doc=UnicodeDocstrings.isnumeric.__doc__), isprintable = interp2app(W_UnicodeObject.descr_isprintable, doc=UnicodeDocstrings.isprintable.__doc__), isspace = interp2app(W_UnicodeObject.descr_isspace, doc=UnicodeDocstrings.isspace.__doc__), istitle = interp2app(W_UnicodeObject.descr_istitle, doc=UnicodeDocstrings.istitle.__doc__), isupper = interp2app(W_UnicodeObject.descr_isupper, doc=UnicodeDocstrings.isupper.__doc__), join = interp2app(W_UnicodeObject.descr_join, doc=UnicodeDocstrings.join.__doc__), ljust = interp2app(W_UnicodeObject.descr_ljust, doc=UnicodeDocstrings.ljust.__doc__), rjust = interp2app(W_UnicodeObject.descr_rjust, doc=UnicodeDocstrings.rjust.__doc__), lower = interp2app(W_UnicodeObject.descr_lower, doc=UnicodeDocstrings.lower.__doc__), partition = interp2app(W_UnicodeObject.descr_partition, doc=UnicodeDocstrings.partition.__doc__), rpartition = interp2app(W_UnicodeObject.descr_rpartition, doc=UnicodeDocstrings.rpartition.__doc__), replace = interp2app(W_UnicodeObject.descr_replace, doc=UnicodeDocstrings.replace.__doc__), split = interp2app(W_UnicodeObject.descr_split, doc=UnicodeDocstrings.split.__doc__), rsplit = interp2app(W_UnicodeObject.descr_rsplit, doc=UnicodeDocstrings.rsplit.__doc__), splitlines = interp2app(W_UnicodeObject.descr_splitlines, doc=UnicodeDocstrings.splitlines.__doc__), startswith = interp2app(W_UnicodeObject.descr_startswith, doc=UnicodeDocstrings.startswith.__doc__), endswith = interp2app(W_UnicodeObject.descr_endswith, doc=UnicodeDocstrings.endswith.__doc__), strip = interp2app(W_UnicodeObject.descr_strip, doc=UnicodeDocstrings.strip.__doc__), lstrip = interp2app(W_UnicodeObject.descr_lstrip, doc=UnicodeDocstrings.lstrip.__doc__), rstrip = interp2app(W_UnicodeObject.descr_rstrip, doc=UnicodeDocstrings.rstrip.__doc__), swapcase = interp2app(W_UnicodeObject.descr_swapcase, doc=UnicodeDocstrings.swapcase.__doc__), title = interp2app(W_UnicodeObject.descr_title, doc=UnicodeDocstrings.title.__doc__), translate = interp2app(W_UnicodeObject.descr_translate, doc=UnicodeDocstrings.translate.__doc__), upper = interp2app(W_UnicodeObject.descr_upper, doc=UnicodeDocstrings.upper.__doc__), zfill = interp2app(W_UnicodeObject.descr_zfill, doc=UnicodeDocstrings.zfill.__doc__), format = interp2app(W_UnicodeObject.descr_format, doc=UnicodeDocstrings.format.__doc__), format_map = interp2app(W_UnicodeObject.descr_format_map, doc=UnicodeDocstrings.format_map.__doc__), __format__ = interp2app(W_UnicodeObject.descr__format__, doc=UnicodeDocstrings.__format__.__doc__), __mod__ = interp2app(W_UnicodeObject.descr_mod, doc=UnicodeDocstrings.__mod__.__doc__), __rmod__ = interp2app(W_UnicodeObject.descr_rmod, doc=UnicodeDocstrings.__rmod__.__doc__), __getnewargs__ = interp2app(W_UnicodeObject.descr_getnewargs, doc=UnicodeDocstrings.__getnewargs__.__doc__), maketrans = interp2app(W_UnicodeObject.descr_maketrans, as_classmethod=True, doc=UnicodeDocstrings.maketrans.__doc__), ) W_UnicodeObject.typedef.flag_sequence_bug_compat = True def _create_list_from_unicode(value): # need this helper function to allow the jit to look inside and inline
reset win monitor -> dip exceeded threshold df.iat[lossix[slot], lix] = TARGETS[SELL] if (lossix[slot] + time_agg) > tix: break lossix[slot] += time_agg loss[slot] = close_delta_ratio(lossix[slot], tix, cix) elif delta > 0: if win[slot] > 0: # win monitoring is running win[slot] = close_delta_ratio(winix[slot], tix, cix) else: # first time bar of increase period winix[slot] = tix win[slot] = delta if loss[slot] < 0: # loss monitoring is running loss[slot] = close_delta_ratio(lossix[slot], tix, cix) if loss[slot] > 0: loss[slot] = 0. # reset loss monitor -> recovered before sell threshold while win[slot] > BUY_THRESHOLD: loss[slot] = 0. # reset win monitor -> dip exceeded threshold df.iat[winix[slot], lix] = TARGETS[BUY] if (winix[slot] + time_agg) > tix: break winix[slot] += time_agg win[slot] = close_delta_ratio(winix[slot], tix, cix) # report_setsize("complete set", df) # here comes the core of calc_features_and_targets if minute_dataframe is None: if self.minute_data is None: raise MissingHistoryData("{}–{} target {}min without minute data ({})".format( self.base, self.quote, self.target_key, self.vec)) else: self.minute_data = minute_dataframe if self.minute_data.empty is None: self.minute_data = None raise MissingHistoryData("{}–{} target {}min with empty minute data".format( self.base, self.quote, self.target_key)) tf_aggs = calc_aggregation(self.minute_data, TIME_AGGS) if "target" not in self.minute_data: add_targets(self.target_key, tf_aggs[self.target_key]) # add aggregation targets self.minute_data["target"] = tf_aggs[self.target_key]["target"] # print("calculating targets") else: # print("reusing targets") pass self.vec = expand_target_feature_vectors(tf_aggs, self.target_key) if "target" not in self.vec: self.vec["target"] = self.minute_data["target"] # print(f"{len(self.vec)} feature vectors of {len(self.vec.iloc[0])-2} features") def append_minute_df_with_targets(self, minute_df): self.vec = None if "target" not in minute_df: raise ValueError("append_minute_df_with_targets: missing target column") if self.minute_data is None: self.minute_data = minute_df else: self.minute_data = pd.concat([self.minute_data, minute_df], sort=False) def target_performance(self): """calculates the time aggregation specific performance of target_key """ # print(f"{datetime.now()}: calculate target_performance") target_df = self.minute_data perf = 0. ta_holding = False col_ix = target_df.columns.get_loc("target") assert col_ix > 0, f"did not find column {col_ix} of {self.target_key}" close_ix = target_df.columns.get_loc("close") assert target_df.index.is_unique, "unexpected not unique index" last = target_df.iat[0, close_ix] for tix in range(len(target_df)): # tix = time index this = target_df.iat[tix, close_ix] tix_perf = ((this - last) / last) # no longer in per mille * 1000) last = this signal = target_df.iat[tix, col_ix] if ta_holding: perf += tix_perf if (signal == TARGETS[BUY]) and (not ta_holding): perf -= FEE ta_holding = True if (signal == TARGETS[SELL]) and ta_holding: perf -= FEE ta_holding = False return perf class HistorySets: """Container class for targets and features of a currency pair timeblock is the time window in minutes that is analyzed equally for all bases to avoid label leakage. timeblocks are then distributed to train, validate and test to balance buy and sell signals. """ def __init__(self, sets_config_fname): """Uses history data of baselist/USDT as history set to train and evaluate. training control: ================= - df[common timeline index, base, target, training_count, buy_prob, sell_prop, hold_prop, train_next] - step within an epoch, every step-th class of a sym is used - tcount is incremented with every training cycle usage - buy_prob, sell_prop, hold_prop are the class probabilities of the last evaluation - use is an earmark that this sample shall be used for the next training epoch/validation """ self.bases = dict.fromkeys(BASES, None) self.max_steps = dict.fromkeys(BASES) for base in self.max_steps: self.max_steps[base] = {HOLD: 0, BUY: 0, SELL: 0, "max": 0} self.max_steps["total"] = 0 self.timeblock = 4*7*24*60 # time window in minutes that is analyzed equally for all bases self.fixtic = None # tic as fixpoint for timeblock self.analysis = pd.DataFrame(columns=["sym", "set_type", "start", "end", "tics", "buys", "sells", "avg_vol", "novol_count"]) self.ctrl = dict() self.ctrl[TRAIN] = pd.DataFrame(columns=["sym", "timestamp", "target", "use", "buy_prob", "sell_prop", "hold_prop", "step", "tcount"]) self.ctrl[VAL] = pd.DataFrame(columns=["sym", "timestamp", "target", "use", "buy_prob", "sell_prop", "hold_prop"]) self.ctrl[TEST] = pd.DataFrame(columns=["sym", "timestamp", "target", "use", "buy_prob", "sell_prop", "hold_prop"]) self.last_base = None # for base in self.bases: # merge_asset_dataframe(DATA_PATH, base) self.load_sets_config(sets_config_fname) assert not self.analysis.empty, f"{timestr()}: missing sets config" report_setsize(TRAIN, self.ctrl[TRAIN]) report_setsize(VAL, self.ctrl[VAL]) report_setsize(TEST, self.ctrl[TEST]) # self.analyze_bases() def set_of_type(self, base, set_type): sym = base + "_" + QUOTE if SMALLER_16GB_RAM and (self.last_base != base): self.release_features_of_base(self.last_base) try: base_df = self.ctrl[set_type].loc[(self.ctrl[set_type].sym == sym) & (self.ctrl[set_type].use is True)] # print(f"{set_type} set with {len(base_df)} samples for {sym}") return base_df except KeyError: print(f"no {self.set_type} set for {sym}") pass def trainset_step(self, base, step): sym = base + "_" + QUOTE if SMALLER_16GB_RAM and (self.last_base != base): self.release_features_of_base(self.last_base) try: hold_step = step % self.max_steps[base][HOLD] buy_step = step % self.max_steps[base][BUY] sell_step = step % self.max_steps[base][SELL] base_df = self.ctrl[TRAIN].loc[(self.ctrl[TRAIN].sym == sym) & (((self.ctrl[TRAIN].target == TARGETS[HOLD]) & (self.ctrl[TRAIN].step == hold_step)) | ((self.ctrl[TRAIN].target == TARGETS[BUY]) & (self.ctrl[TRAIN].step == buy_step)) | ((self.ctrl[TRAIN].target == TARGETS[SELL]) & (self.ctrl[TRAIN].step == sell_step))) & (self.ctrl[TRAIN].use is True)] # report_setsize(f"{sym} {TRAIN} set step {step}", base_df) return base_df except KeyError: print(f"no {self.set_type} set for {sym}") pass def load_sets_config(self, config_fname): def use_settype_total(): """Uses the set_type of "total" and apllies it to all sets with such timeblock. """ cdf = self.analysis.set_index("end") cdf["set_type"] = cdf.loc[cdf.sym == "total"]["set_type"] # cdf["end"] = cdf.index # is already doen by reset_index self.analysis = cdf.reset_index() try: self.analysis = pd.read_csv(config_fname, skipinitialspace=True, sep="\t") except IOError: print(f"pd.read_csv({config_fname}) IO error") return None # use_settype_total() # self.analysis.to_csv(config_fname, sep="\t", index=False) self.prepare_training() def features_from_targets(self, df, base, set_type, step): if df.empty: raise NoSubsetWarning("empty {} subset for {}".format(set_type, base)) sym = df.at[df.index[0], "sym"] df_base = base_of_sym(sym) if base != df_base: raise ValueError(f"features_from_targets: base(df)={df_base} != base={base}") tfv = self.get_targets_features_of_base(base) try: subset_df = targets_to_features(tfv.vec, df) except NoSubsetWarning as msg: print("features_from_targets {} {} set step {}: {}".format( base, set_type, step, msg)) raise descr = "{} {} {} set step {}: {}".format(timestr(), base, set_type, step, str_setsize(subset_df)) # print(descr) samples = to_scikitlearn(subset_df, np_data=None, descr=descr) return samples def prepare_training(self): """Prepares training, validation and test sets with targets and admin info (see class description). These determine the samples per set_type and whether they are used in a step. It is assumed that load_sets_config was called and self.analysis contains the proper config fiel content. """ def samples_concat(target, to_be_added): if target.empty: target = to_be_added # print("target empty --> target = to_be_added", target.head(), target.tail()) return to_be_added if False: # debugging output elen = len(target) xdf = target.tail() if ("step" in xdf.columns): xdf = xdf[["target", "timestamp", "step"]] else: xdf = xdf[["target", "timestamp"]] print(f"target len: {elen}", xdf) ydf = to_be_added.head() if ("step" in ydf.columns): ydf = ydf[["target", "timestamp", "step"]] else: ydf = ydf[["target", "timestamp"]] print(f"time agg timeblock len: {len(to_be_added)}", ydf) target = pd.concat([target, to_be_added], sort=False) if False: # debugging output zdf = target.iloc[range(elen-5, elen+5)] elen = len(target) if ("step" in zdf.columns): zdf = zdf[["target", "timestamp", "step"]] else: zdf = zdf[["target", "timestamp"]] print(f"concat with new len {elen} result at interface: ", zdf) return target def extract_set_type_targets(base, tf, set_type): sym = base + "_" + QUOTE try: # print(f"extracting {set_type} for {sym}") dfcfg = self.analysis.loc[(self.analysis.set_type == set_type) & (self.analysis.sym == sym)] except KeyError: print(f"no {set_type} set for {sym}") return None dft = tf.minute_data extract = None for block in dfcfg.index: df = dft.loc[(dft.index >= dfcfg.at[block, "start"]) & (dft.index <= dfcfg.at[block, "end"]), ["target", "close"]] df["timestamp"] = df.index df["sym"] = sym df["use"] = True df["buy_prob"] = float(0) df["sell_prop"] = float(0) df["hold_prop"] = float(0) if set_type == TRAIN: df["tcount"] = int(0) df["step"] = int(0) if extract is None: extract = df else: extract = samples_concat(extract, df) return extract # here comes the core of prepare_training() for base in self.bases: tf = TargetsFeatures(base, QUOTE) try: tf.load_classifier_features() except MissingHistoryData: continue self.bases[base] = tf tfv = tf.vec tdf = extract_set_type_targets(base, tf, TRAIN) tdf = tdf[tdf.index.isin(tfv.index)] self.ctrl[TRAIN] = samples_concat(self.ctrl[TRAIN], tdf) vdf = extract_set_type_targets(base, tf, VAL) vdf = vdf[vdf.index.isin(tfv.index)] self.ctrl[VAL] = samples_concat(self.ctrl[VAL], vdf) tstdf = extract_set_type_targets(base, tf, TEST) tstdf = tstdf[tstdf.index.isin(tfv.index)] self.ctrl[TEST] = samples_concat(self.ctrl[TEST], tstdf) def get_targets_features_of_base(self, base): if base not in self.bases: raise KeyError() tf = self.bases[base] if tf is None: tf = TargetsFeatures(base, QUOTE) tf.load_classifier_features() if tf is not None: if tf.vec is None: try: tf.calc_features_and_targets(None) except MissingHistoryData as msg: print(f"get_targets_features_of_base {base}: {msg}") return tf def release_features_of_base(self, base): if base in self.bases: tf = self.bases[base] if tf is not None: tf.vec = None def register_probabilties(self, base, set_type, pred, target_df): df = self.ctrl[set_type] tdf = target_df sym = base + "_" + QUOTE df.loc[df.index.isin(tdf.index)
<filename>tests/examples/minlplib/pooling_foulds5tp.py<gh_stars>1-10 # NLP written by GAMS Convert at 04/21/18 13:53:11 # # Equation counts # Total E G L N X C B # 564 517 0 47 0 0 0 0 # # Variable counts # x b i s1s s2s sc si # Total cont binary integer sos1 sos2 scont sint # 609 609 0 0 0 0 0 0 # FX 0 0 0 0 0 0 0 0 # # Nonzero counts # Total const NL DLL # 4112 3088 1024 0 # # Reformulation has removed 1 variable and 1 equation from pyomo.environ import * model = m = ConcreteModel() m.x2 = Var(within=Reals,bounds=(0,1),initialize=0) m.x3 = Var(within=Reals,bounds=(0,1),initialize=0) m.x4 = Var(within=Reals,bounds=(0,1),initialize=0) m.x5 = Var(within=Reals,bounds=(0,1),initialize=0) m.x6 = Var(within=Reals,bounds=(0,1),initialize=0) m.x7 = Var(within=Reals,bounds=(0,1),initialize=0) m.x8 = Var(within=Reals,bounds=(0,1),initialize=0) m.x9 = Var(within=Reals,bounds=(0,1),initialize=0) m.x10 = Var(within=Reals,bounds=(0,1),initialize=0) m.x11 = Var(within=Reals,bounds=(0,1),initialize=0) m.x12 = Var(within=Reals,bounds=(0,1),initialize=0) m.x13 = Var(within=Reals,bounds=(0,1),initialize=0) m.x14 = Var(within=Reals,bounds=(0,1),initialize=0) m.x15 = Var(within=Reals,bounds=(0,1),initialize=0) m.x16 = Var(within=Reals,bounds=(0,1),initialize=0) m.x17 = Var(within=Reals,bounds=(0,1),initialize=0) m.x18 = Var(within=Reals,bounds=(0,1),initialize=0) m.x19 = Var(within=Reals,bounds=(0,1),initialize=0) m.x20 = Var(within=Reals,bounds=(0,1),initialize=0) m.x21 = Var(within=Reals,bounds=(0,1),initialize=0) m.x22 = Var(within=Reals,bounds=(0,1),initialize=0) m.x23 = Var(within=Reals,bounds=(0,1),initialize=0) m.x24 = Var(within=Reals,bounds=(0,1),initialize=0) m.x25 = Var(within=Reals,bounds=(0,1),initialize=0) m.x26 = Var(within=Reals,bounds=(0,1),initialize=0) m.x27 = Var(within=Reals,bounds=(0,1),initialize=0) m.x28 = Var(within=Reals,bounds=(0,1),initialize=0) m.x29 = Var(within=Reals,bounds=(0,1),initialize=0) m.x30 = Var(within=Reals,bounds=(0,1),initialize=0) m.x31 = Var(within=Reals,bounds=(0,1),initialize=0) m.x32 = Var(within=Reals,bounds=(0,1),initialize=0) m.x33 = Var(within=Reals,bounds=(0,1),initialize=0) m.x34 = Var(within=Reals,bounds=(0,1),initialize=0) m.x35 = Var(within=Reals,bounds=(0,1),initialize=0) m.x36 = Var(within=Reals,bounds=(0,1),initialize=0) m.x37 = Var(within=Reals,bounds=(0,1),initialize=0) m.x38 = Var(within=Reals,bounds=(0,1),initialize=0) m.x39 = Var(within=Reals,bounds=(0,1),initialize=0) m.x40 = Var(within=Reals,bounds=(0,1),initialize=0) m.x41 = Var(within=Reals,bounds=(0,1),initialize=0) m.x42 = Var(within=Reals,bounds=(0,1),initialize=0) m.x43 = Var(within=Reals,bounds=(0,1),initialize=0) m.x44 = Var(within=Reals,bounds=(0,1),initialize=0) m.x45 = Var(within=Reals,bounds=(0,1),initialize=0) m.x46 = Var(within=Reals,bounds=(0,1),initialize=0) m.x47 = Var(within=Reals,bounds=(0,1),initialize=0) m.x48 = Var(within=Reals,bounds=(0,1),initialize=0) m.x49 = Var(within=Reals,bounds=(0,1),initialize=0) m.x50 = Var(within=Reals,bounds=(0,1),initialize=0) m.x51 = Var(within=Reals,bounds=(0,1),initialize=0) m.x52 = Var(within=Reals,bounds=(0,1),initialize=0) m.x53 = Var(within=Reals,bounds=(0,1),initialize=0) m.x54 = Var(within=Reals,bounds=(0,1),initialize=0) m.x55 = Var(within=Reals,bounds=(0,1),initialize=0) m.x56 = Var(within=Reals,bounds=(0,1),initialize=0) m.x57 = Var(within=Reals,bounds=(0,1),initialize=0) m.x58 = Var(within=Reals,bounds=(0,1),initialize=0) m.x59 = Var(within=Reals,bounds=(0,1),initialize=0) m.x60 = Var(within=Reals,bounds=(0,1),initialize=0) m.x61 = Var(within=Reals,bounds=(0,1),initialize=0) m.x62 = Var(within=Reals,bounds=(0,1),initialize=0) m.x63 = Var(within=Reals,bounds=(0,1),initialize=0) m.x64 = Var(within=Reals,bounds=(0,1),initialize=0) m.x65 = Var(within=Reals,bounds=(0,1),initialize=0) m.x66 = Var(within=Reals,bounds=(0,1),initialize=0) m.x67 = Var(within=Reals,bounds=(0,1),initialize=0) m.x68 = Var(within=Reals,bounds=(0,1),initialize=0) m.x69 = Var(within=Reals,bounds=(0,1),initialize=0) m.x70 = Var(within=Reals,bounds=(0,1),initialize=0) m.x71 = Var(within=Reals,bounds=(0,1),initialize=0) m.x72 = Var(within=Reals,bounds=(0,1),initialize=0) m.x73 = Var(within=Reals,bounds=(0,1),initialize=0) m.x74 = Var(within=Reals,bounds=(0,1),initialize=0) m.x75 = Var(within=Reals,bounds=(0,1),initialize=0) m.x76 = Var(within=Reals,bounds=(0,1),initialize=0) m.x77 = Var(within=Reals,bounds=(0,1),initialize=0) m.x78 = Var(within=Reals,bounds=(0,1),initialize=0) m.x79 = Var(within=Reals,bounds=(0,1),initialize=0) m.x80 = Var(within=Reals,bounds=(0,1),initialize=0) m.x81 = Var(within=Reals,bounds=(0,1),initialize=0) m.x82 = Var(within=Reals,bounds=(0,1),initialize=0) m.x83 = Var(within=Reals,bounds=(0,1),initialize=0) m.x84 = Var(within=Reals,bounds=(0,1),initialize=0) m.x85 = Var(within=Reals,bounds=(0,1),initialize=0) m.x86 = Var(within=Reals,bounds=(0,1),initialize=0) m.x87 = Var(within=Reals,bounds=(0,1),initialize=0) m.x88 = Var(within=Reals,bounds=(0,1),initialize=0) m.x89 = Var(within=Reals,bounds=(0,1),initialize=0) m.x90 = Var(within=Reals,bounds=(0,1),initialize=0) m.x91 = Var(within=Reals,bounds=(0,1),initialize=0) m.x92 = Var(within=Reals,bounds=(0,1),initialize=0) m.x93 = Var(within=Reals,bounds=(0,1),initialize=0) m.x94 = Var(within=Reals,bounds=(0,1),initialize=0) m.x95 = Var(within=Reals,bounds=(0,1),initialize=0) m.x96 = Var(within=Reals,bounds=(0,1),initialize=0) m.x97 = Var(within=Reals,bounds=(0,1),initialize=0) m.x98 = Var(within=Reals,bounds=(0,1),initialize=0) m.x99 = Var(within=Reals,bounds=(0,1),initialize=0) m.x100 = Var(within=Reals,bounds=(0,1),initialize=0) m.x101 = Var(within=Reals,bounds=(0,1),initialize=0) m.x102 = Var(within=Reals,bounds=(0,1),initialize=0) m.x103 = Var(within=Reals,bounds=(0,1),initialize=0) m.x104 = Var(within=Reals,bounds=(0,1),initialize=0) m.x105 = Var(within=Reals,bounds=(0,1),initialize=0) m.x106 = Var(within=Reals,bounds=(0,1),initialize=0) m.x107 = Var(within=Reals,bounds=(0,1),initialize=0) m.x108 = Var(within=Reals,bounds=(0,1),initialize=0) m.x109 = Var(within=Reals,bounds=(0,1),initialize=0) m.x110 = Var(within=Reals,bounds=(0,1),initialize=0) m.x111 = Var(within=Reals,bounds=(0,1),initialize=0) m.x112 = Var(within=Reals,bounds=(0,1),initialize=0) m.x113 = Var(within=Reals,bounds=(0,1),initialize=0) m.x114 = Var(within=Reals,bounds=(0,1),initialize=0) m.x115 = Var(within=Reals,bounds=(0,1),initialize=0) m.x116 = Var(within=Reals,bounds=(0,1),initialize=0) m.x117 = Var(within=Reals,bounds=(0,1),initialize=0) m.x118 = Var(within=Reals,bounds=(0,1),initialize=0) m.x119 = Var(within=Reals,bounds=(0,1),initialize=0) m.x120 = Var(within=Reals,bounds=(0,1),initialize=0) m.x121 = Var(within=Reals,bounds=(0,1),initialize=0) m.x122 = Var(within=Reals,bounds=(0,1),initialize=0) m.x123 = Var(within=Reals,bounds=(0,1),initialize=0) m.x124 = Var(within=Reals,bounds=(0,1),initialize=0) m.x125 = Var(within=Reals,bounds=(0,1),initialize=0) m.x126 = Var(within=Reals,bounds=(0,1),initialize=0) m.x127 = Var(within=Reals,bounds=(0,1),initialize=0) m.x128 = Var(within=Reals,bounds=(0,1),initialize=0) m.x129 = Var(within=Reals,bounds=(0,1),initialize=0) m.x130 = Var(within=Reals,bounds=(0,1),initialize=0) m.x131 = Var(within=Reals,bounds=(0,1),initialize=0) m.x132 = Var(within=Reals,bounds=(0,1),initialize=0) m.x133 = Var(within=Reals,bounds=(0,1),initialize=0) m.x134 = Var(within=Reals,bounds=(0,1),initialize=0) m.x135 = Var(within=Reals,bounds=(0,1),initialize=0) m.x136 = Var(within=Reals,bounds=(0,1),initialize=0) m.x137 = Var(within=Reals,bounds=(0,1),initialize=0) m.x138 = Var(within=Reals,bounds=(0,1),initialize=0) m.x139 = Var(within=Reals,bounds=(0,1),initialize=0) m.x140 = Var(within=Reals,bounds=(0,1),initialize=0) m.x141 = Var(within=Reals,bounds=(0,1),initialize=0) m.x142 = Var(within=Reals,bounds=(0,1),initialize=0) m.x143 = Var(within=Reals,bounds=(0,1),initialize=0) m.x144 = Var(within=Reals,bounds=(0,1),initialize=0) m.x145 = Var(within=Reals,bounds=(0,1),initialize=0) m.x146 = Var(within=Reals,bounds=(0,1),initialize=0) m.x147 = Var(within=Reals,bounds=(0,1),initialize=0) m.x148 = Var(within=Reals,bounds=(0,1),initialize=0) m.x149 = Var(within=Reals,bounds=(0,1),initialize=0) m.x150 = Var(within=Reals,bounds=(0,1),initialize=0) m.x151 = Var(within=Reals,bounds=(0,1),initialize=0) m.x152 = Var(within=Reals,bounds=(0,1),initialize=0) m.x153 = Var(within=Reals,bounds=(0,1),initialize=0) m.x154 = Var(within=Reals,bounds=(0,1),initialize=0) m.x155 = Var(within=Reals,bounds=(0,1),initialize=0) m.x156 = Var(within=Reals,bounds=(0,1),initialize=0) m.x157 = Var(within=Reals,bounds=(0,1),initialize=0) m.x158 = Var(within=Reals,bounds=(0,1),initialize=0) m.x159 = Var(within=Reals,bounds=(0,1),initialize=0) m.x160 = Var(within=Reals,bounds=(0,1),initialize=0) m.x161 = Var(within=Reals,bounds=(0,1),initialize=0) m.x162 = Var(within=Reals,bounds=(0,1),initialize=0) m.x163 = Var(within=Reals,bounds=(0,1),initialize=0) m.x164 = Var(within=Reals,bounds=(0,1),initialize=0) m.x165 = Var(within=Reals,bounds=(0,1),initialize=0) m.x166 = Var(within=Reals,bounds=(0,1),initialize=0) m.x167 = Var(within=Reals,bounds=(0,1),initialize=0) m.x168 = Var(within=Reals,bounds=(0,1),initialize=0) m.x169 = Var(within=Reals,bounds=(0,1),initialize=0) m.x170 = Var(within=Reals,bounds=(0,1),initialize=0) m.x171 = Var(within=Reals,bounds=(0,1),initialize=0) m.x172 = Var(within=Reals,bounds=(0,1),initialize=0) m.x173 = Var(within=Reals,bounds=(0,1),initialize=0) m.x174 = Var(within=Reals,bounds=(0,1),initialize=0) m.x175 = Var(within=Reals,bounds=(0,1),initialize=0) m.x176 = Var(within=Reals,bounds=(0,1),initialize=0) m.x177 = Var(within=Reals,bounds=(0,1),initialize=0) m.x178 = Var(within=Reals,bounds=(0,1),initialize=0) m.x179 = Var(within=Reals,bounds=(0,1),initialize=0) m.x180 = Var(within=Reals,bounds=(0,1),initialize=0) m.x181 = Var(within=Reals,bounds=(0,1),initialize=0) m.x182 = Var(within=Reals,bounds=(0,1),initialize=0) m.x183 = Var(within=Reals,bounds=(0,1),initialize=0) m.x184 = Var(within=Reals,bounds=(0,1),initialize=0) m.x185 = Var(within=Reals,bounds=(0,1),initialize=0) m.x186 = Var(within=Reals,bounds=(0,1),initialize=0) m.x187 = Var(within=Reals,bounds=(0,1),initialize=0) m.x188 = Var(within=Reals,bounds=(0,1),initialize=0) m.x189 = Var(within=Reals,bounds=(0,1),initialize=0) m.x190 = Var(within=Reals,bounds=(0,1),initialize=0) m.x191 = Var(within=Reals,bounds=(0,1),initialize=0) m.x192 = Var(within=Reals,bounds=(0,1),initialize=0) m.x193 = Var(within=Reals,bounds=(0,1),initialize=0) m.x194 = Var(within=Reals,bounds=(0,1),initialize=0) m.x195 = Var(within=Reals,bounds=(0,1),initialize=0) m.x196 = Var(within=Reals,bounds=(0,1),initialize=0) m.x197 = Var(within=Reals,bounds=(0,1),initialize=0) m.x198 = Var(within=Reals,bounds=(0,1),initialize=0) m.x199 = Var(within=Reals,bounds=(0,1),initialize=0) m.x200 = Var(within=Reals,bounds=(0,1),initialize=0) m.x201 = Var(within=Reals,bounds=(0,1),initialize=0) m.x202 = Var(within=Reals,bounds=(0,1),initialize=0) m.x203 = Var(within=Reals,bounds=(0,1),initialize=0) m.x204 = Var(within=Reals,bounds=(0,1),initialize=0) m.x205 = Var(within=Reals,bounds=(0,1),initialize=0) m.x206 = Var(within=Reals,bounds=(0,1),initialize=0) m.x207 = Var(within=Reals,bounds=(0,1),initialize=0) m.x208 = Var(within=Reals,bounds=(0,1),initialize=0) m.x209 = Var(within=Reals,bounds=(0,1),initialize=0) m.x210 = Var(within=Reals,bounds=(0,1),initialize=0) m.x211 = Var(within=Reals,bounds=(0,1),initialize=0) m.x212 = Var(within=Reals,bounds=(0,1),initialize=0) m.x213 = Var(within=Reals,bounds=(0,1),initialize=0) m.x214 = Var(within=Reals,bounds=(0,1),initialize=0) m.x215 = Var(within=Reals,bounds=(0,1),initialize=0) m.x216 = Var(within=Reals,bounds=(0,1),initialize=0) m.x217 = Var(within=Reals,bounds=(0,1),initialize=0) m.x218 = Var(within=Reals,bounds=(0,1),initialize=0) m.x219 = Var(within=Reals,bounds=(0,1),initialize=0) m.x220 = Var(within=Reals,bounds=(0,1),initialize=0) m.x221 = Var(within=Reals,bounds=(0,1),initialize=0) m.x222 = Var(within=Reals,bounds=(0,1),initialize=0) m.x223 = Var(within=Reals,bounds=(0,1),initialize=0) m.x224 = Var(within=Reals,bounds=(0,1),initialize=0) m.x225 = Var(within=Reals,bounds=(0,1),initialize=0) m.x226 = Var(within=Reals,bounds=(0,1),initialize=0) m.x227 = Var(within=Reals,bounds=(0,1),initialize=0) m.x228 = Var(within=Reals,bounds=(0,1),initialize=0) m.x229 = Var(within=Reals,bounds=(0,1),initialize=0) m.x230 = Var(within=Reals,bounds=(0,1),initialize=0) m.x231 = Var(within=Reals,bounds=(0,1),initialize=0) m.x232 = Var(within=Reals,bounds=(0,1),initialize=0) m.x233 = Var(within=Reals,bounds=(0,1),initialize=0) m.x234 = Var(within=Reals,bounds=(0,1),initialize=0) m.x235 = Var(within=Reals,bounds=(0,1),initialize=0) m.x236 = Var(within=Reals,bounds=(0,1),initialize=0) m.x237 = Var(within=Reals,bounds=(0,1),initialize=0) m.x238 = Var(within=Reals,bounds=(0,1),initialize=0) m.x239 = Var(within=Reals,bounds=(0,1),initialize=0) m.x240 = Var(within=Reals,bounds=(0,1),initialize=0) m.x241 = Var(within=Reals,bounds=(0,1),initialize=0) m.x242 = Var(within=Reals,bounds=(0,1),initialize=0) m.x243 = Var(within=Reals,bounds=(0,1),initialize=0) m.x244 = Var(within=Reals,bounds=(0,1),initialize=0) m.x245 = Var(within=Reals,bounds=(0,1),initialize=0) m.x246 = Var(within=Reals,bounds=(0,1),initialize=0) m.x247 = Var(within=Reals,bounds=(0,1),initialize=0) m.x248 = Var(within=Reals,bounds=(0,1),initialize=0) m.x249 = Var(within=Reals,bounds=(0,1),initialize=0) m.x250 = Var(within=Reals,bounds=(0,1),initialize=0) m.x251 = Var(within=Reals,bounds=(0,1),initialize=0) m.x252 = Var(within=Reals,bounds=(0,1),initialize=0) m.x253 = Var(within=Reals,bounds=(0,1),initialize=0) m.x254 = Var(within=Reals,bounds=(0,1),initialize=0) m.x255 = Var(within=Reals,bounds=(0,1),initialize=0) m.x256 = Var(within=Reals,bounds=(0,1),initialize=0) m.x257 = Var(within=Reals,bounds=(0,1),initialize=0) m.x258 = Var(within=Reals,bounds=(0,1),initialize=0) m.x259 = Var(within=Reals,bounds=(0,1),initialize=0) m.x260 = Var(within=Reals,bounds=(0,1),initialize=0) m.x261 = Var(within=Reals,bounds=(0,1),initialize=0) m.x262 = Var(within=Reals,bounds=(0,1),initialize=0) m.x263 = Var(within=Reals,bounds=(0,1),initialize=0) m.x264 = Var(within=Reals,bounds=(0,1),initialize=0) m.x265 = Var(within=Reals,bounds=(0,1),initialize=0) m.x266 = Var(within=Reals,bounds=(0,1),initialize=0) m.x267 = Var(within=Reals,bounds=(0,1),initialize=0) m.x268 = Var(within=Reals,bounds=(0,1),initialize=0) m.x269 = Var(within=Reals,bounds=(0,1),initialize=0) m.x270 = Var(within=Reals,bounds=(0,1),initialize=0) m.x271 = Var(within=Reals,bounds=(0,1),initialize=0) m.x272 = Var(within=Reals,bounds=(0,1),initialize=0) m.x273 = Var(within=Reals,bounds=(0,1),initialize=0) m.x274 = Var(within=Reals,bounds=(0,1),initialize=0) m.x275 = Var(within=Reals,bounds=(0,1),initialize=0) m.x276 = Var(within=Reals,bounds=(0,1),initialize=0) m.x277 = Var(within=Reals,bounds=(0,1),initialize=0) m.x278 = Var(within=Reals,bounds=(0,1),initialize=0) m.x279 = Var(within=Reals,bounds=(0,1),initialize=0) m.x280 = Var(within=Reals,bounds=(0,1),initialize=0) m.x281 = Var(within=Reals,bounds=(0,1),initialize=0) m.x282 = Var(within=Reals,bounds=(0,1),initialize=0) m.x283 = Var(within=Reals,bounds=(0,1),initialize=0) m.x284 = Var(within=Reals,bounds=(0,1),initialize=0) m.x285 = Var(within=Reals,bounds=(0,1),initialize=0) m.x286 = Var(within=Reals,bounds=(0,1),initialize=0) m.x287 = Var(within=Reals,bounds=(0,1),initialize=0) m.x288 = Var(within=Reals,bounds=(0,1),initialize=0) m.x289 = Var(within=Reals,bounds=(0,1),initialize=0) m.x290 = Var(within=Reals,bounds=(0,1),initialize=0) m.x291 = Var(within=Reals,bounds=(0,1),initialize=0) m.x292 = Var(within=Reals,bounds=(0,1),initialize=0) m.x293 = Var(within=Reals,bounds=(0,1),initialize=0) m.x294 = Var(within=Reals,bounds=(0,1),initialize=0) m.x295 = Var(within=Reals,bounds=(0,1),initialize=0) m.x296 = Var(within=Reals,bounds=(0,1),initialize=0) m.x297 = Var(within=Reals,bounds=(0,1),initialize=0) m.x298 = Var(within=Reals,bounds=(0,1),initialize=0) m.x299 = Var(within=Reals,bounds=(0,1),initialize=0) m.x300 = Var(within=Reals,bounds=(0,1),initialize=0) m.x301 = Var(within=Reals,bounds=(0,1),initialize=0) m.x302 = Var(within=Reals,bounds=(0,1),initialize=0) m.x303 = Var(within=Reals,bounds=(0,1),initialize=0) m.x304 = Var(within=Reals,bounds=(0,1),initialize=0) m.x305 = Var(within=Reals,bounds=(0,1),initialize=0) m.x306 = Var(within=Reals,bounds=(0,1),initialize=0) m.x307 = Var(within=Reals,bounds=(0,1),initialize=0) m.x308 = Var(within=Reals,bounds=(0,1),initialize=0) m.x309 = Var(within=Reals,bounds=(0,1),initialize=0) m.x310 = Var(within=Reals,bounds=(0,1),initialize=0) m.x311 = Var(within=Reals,bounds=(0,1),initialize=0) m.x312 = Var(within=Reals,bounds=(0,1),initialize=0) m.x313 = Var(within=Reals,bounds=(0,1),initialize=0) m.x314 = Var(within=Reals,bounds=(0,1),initialize=0) m.x315 = Var(within=Reals,bounds=(0,1),initialize=0) m.x316 = Var(within=Reals,bounds=(0,1),initialize=0) m.x317 = Var(within=Reals,bounds=(0,1),initialize=0) m.x318 = Var(within=Reals,bounds=(0,1),initialize=0) m.x319 = Var(within=Reals,bounds=(0,1),initialize=0) m.x320 = Var(within=Reals,bounds=(0,1),initialize=0) m.x321 = Var(within=Reals,bounds=(0,1),initialize=0) m.x322 = Var(within=Reals,bounds=(0,1),initialize=0) m.x323 = Var(within=Reals,bounds=(0,1),initialize=0) m.x324 = Var(within=Reals,bounds=(0,1),initialize=0) m.x325 = Var(within=Reals,bounds=(0,1),initialize=0) m.x326 = Var(within=Reals,bounds=(0,1),initialize=0) m.x327 = Var(within=Reals,bounds=(0,1),initialize=0) m.x328 = Var(within=Reals,bounds=(0,1),initialize=0) m.x329 = Var(within=Reals,bounds=(0,1),initialize=0) m.x330 = Var(within=Reals,bounds=(0,1),initialize=0) m.x331 = Var(within=Reals,bounds=(0,1),initialize=0) m.x332 = Var(within=Reals,bounds=(0,1),initialize=0) m.x333 = Var(within=Reals,bounds=(0,1),initialize=0) m.x334 = Var(within=Reals,bounds=(0,1),initialize=0) m.x335 = Var(within=Reals,bounds=(0,1),initialize=0) m.x336 = Var(within=Reals,bounds=(0,1),initialize=0) m.x337 = Var(within=Reals,bounds=(0,1),initialize=0) m.x338 = Var(within=Reals,bounds=(0,1),initialize=0) m.x339 = Var(within=Reals,bounds=(0,1),initialize=0) m.x340 = Var(within=Reals,bounds=(0,1),initialize=0) m.x341 = Var(within=Reals,bounds=(0,1),initialize=0) m.x342 = Var(within=Reals,bounds=(0,1),initialize=0) m.x343 = Var(within=Reals,bounds=(0,1),initialize=0) m.x344 = Var(within=Reals,bounds=(0,1),initialize=0) m.x345 = Var(within=Reals,bounds=(0,1),initialize=0) m.x346 = Var(within=Reals,bounds=(0,1),initialize=0) m.x347 = Var(within=Reals,bounds=(0,1),initialize=0) m.x348 = Var(within=Reals,bounds=(0,1),initialize=0) m.x349 = Var(within=Reals,bounds=(0,1),initialize=0) m.x350 = Var(within=Reals,bounds=(0,1),initialize=0) m.x351 = Var(within=Reals,bounds=(0,1),initialize=0) m.x352 = Var(within=Reals,bounds=(0,1),initialize=0) m.x353 = Var(within=Reals,bounds=(0,1),initialize=0) m.x354 = Var(within=Reals,bounds=(0,1),initialize=0) m.x355 = Var(within=Reals,bounds=(0,1),initialize=0) m.x356 = Var(within=Reals,bounds=(0,1),initialize=0) m.x357 = Var(within=Reals,bounds=(0,1),initialize=0) m.x358 = Var(within=Reals,bounds=(0,1),initialize=0) m.x359 = Var(within=Reals,bounds=(0,1),initialize=0) m.x360 = Var(within=Reals,bounds=(0,1),initialize=0) m.x361 = Var(within=Reals,bounds=(0,1),initialize=0) m.x362 = Var(within=Reals,bounds=(0,1),initialize=0) m.x363 = Var(within=Reals,bounds=(0,1),initialize=0) m.x364 = Var(within=Reals,bounds=(0,1),initialize=0) m.x365 = Var(within=Reals,bounds=(0,1),initialize=0) m.x366 = Var(within=Reals,bounds=(0,1),initialize=0) m.x367 = Var(within=Reals,bounds=(0,1),initialize=0) m.x368 = Var(within=Reals,bounds=(0,1),initialize=0) m.x369 = Var(within=Reals,bounds=(0,1),initialize=0) m.x370 = Var(within=Reals,bounds=(0,1),initialize=0) m.x371 = Var(within=Reals,bounds=(0,1),initialize=0) m.x372 = Var(within=Reals,bounds=(0,1),initialize=0) m.x373 = Var(within=Reals,bounds=(0,1),initialize=0) m.x374 = Var(within=Reals,bounds=(0,1),initialize=0) m.x375 = Var(within=Reals,bounds=(0,1),initialize=0) m.x376 = Var(within=Reals,bounds=(0,1),initialize=0) m.x377 = Var(within=Reals,bounds=(0,1),initialize=0) m.x378 = Var(within=Reals,bounds=(0,1),initialize=0) m.x379 = Var(within=Reals,bounds=(0,1),initialize=0) m.x380 = Var(within=Reals,bounds=(0,1),initialize=0) m.x381 = Var(within=Reals,bounds=(0,1),initialize=0) m.x382 = Var(within=Reals,bounds=(0,1),initialize=0) m.x383 = Var(within=Reals,bounds=(0,1),initialize=0) m.x384 = Var(within=Reals,bounds=(0,1),initialize=0) m.x385 = Var(within=Reals,bounds=(0,1),initialize=0) m.x386 = Var(within=Reals,bounds=(0,1),initialize=0) m.x387 = Var(within=Reals,bounds=(0,1),initialize=0) m.x388 = Var(within=Reals,bounds=(0,1),initialize=0) m.x389 = Var(within=Reals,bounds=(0,1),initialize=0) m.x390 = Var(within=Reals,bounds=(0,1),initialize=0) m.x391 = Var(within=Reals,bounds=(0,1),initialize=0) m.x392 = Var(within=Reals,bounds=(0,1),initialize=0) m.x393 = Var(within=Reals,bounds=(0,1),initialize=0) m.x394 = Var(within=Reals,bounds=(0,1),initialize=0) m.x395 = Var(within=Reals,bounds=(0,1),initialize=0) m.x396 = Var(within=Reals,bounds=(0,1),initialize=0) m.x397 = Var(within=Reals,bounds=(0,1),initialize=0) m.x398 = Var(within=Reals,bounds=(0,1),initialize=0) m.x399 = Var(within=Reals,bounds=(0,1),initialize=0) m.x400 = Var(within=Reals,bounds=(0,1),initialize=0) m.x401 = Var(within=Reals,bounds=(0,1),initialize=0) m.x402 = Var(within=Reals,bounds=(0,1),initialize=0) m.x403 = Var(within=Reals,bounds=(0,1),initialize=0) m.x404 = Var(within=Reals,bounds=(0,1),initialize=0) m.x405 = Var(within=Reals,bounds=(0,1),initialize=0) m.x406 = Var(within=Reals,bounds=(0,1),initialize=0) m.x407 = Var(within=Reals,bounds=(0,1),initialize=0) m.x408 = Var(within=Reals,bounds=(0,1),initialize=0) m.x409 = Var(within=Reals,bounds=(0,1),initialize=0) m.x410 = Var(within=Reals,bounds=(0,1),initialize=0) m.x411 = Var(within=Reals,bounds=(0,1),initialize=0) m.x412 = Var(within=Reals,bounds=(0,1),initialize=0) m.x413 = Var(within=Reals,bounds=(0,1),initialize=0) m.x414 = Var(within=Reals,bounds=(0,1),initialize=0) m.x415 = Var(within=Reals,bounds=(0,1),initialize=0) m.x416 = Var(within=Reals,bounds=(0,1),initialize=0) m.x417 = Var(within=Reals,bounds=(0,1),initialize=0) m.x418 = Var(within=Reals,bounds=(0,1),initialize=0) m.x419 = Var(within=Reals,bounds=(0,1),initialize=0) m.x420 = Var(within=Reals,bounds=(0,1),initialize=0) m.x421 = Var(within=Reals,bounds=(0,1),initialize=0) m.x422 = Var(within=Reals,bounds=(0,1),initialize=0) m.x423 = Var(within=Reals,bounds=(0,1),initialize=0) m.x424 = Var(within=Reals,bounds=(0,1),initialize=0) m.x425 = Var(within=Reals,bounds=(0,1),initialize=0) m.x426 = Var(within=Reals,bounds=(0,1),initialize=0) m.x427 = Var(within=Reals,bounds=(0,1),initialize=0) m.x428 = Var(within=Reals,bounds=(0,1),initialize=0) m.x429 = Var(within=Reals,bounds=(0,1),initialize=0) m.x430 = Var(within=Reals,bounds=(0,1),initialize=0) m.x431 = Var(within=Reals,bounds=(0,1),initialize=0) m.x432 = Var(within=Reals,bounds=(0,1),initialize=0) m.x433 = Var(within=Reals,bounds=(0,1),initialize=0) m.x434 = Var(within=Reals,bounds=(0,1),initialize=0) m.x435 = Var(within=Reals,bounds=(0,1),initialize=0) m.x436 = Var(within=Reals,bounds=(0,1),initialize=0) m.x437 = Var(within=Reals,bounds=(0,1),initialize=0) m.x438 = Var(within=Reals,bounds=(0,1),initialize=0) m.x439 = Var(within=Reals,bounds=(0,1),initialize=0) m.x440 = Var(within=Reals,bounds=(0,1),initialize=0) m.x441 = Var(within=Reals,bounds=(0,1),initialize=0) m.x442 = Var(within=Reals,bounds=(0,1),initialize=0) m.x443 = Var(within=Reals,bounds=(0,1),initialize=0) m.x444 = Var(within=Reals,bounds=(0,1),initialize=0) m.x445 = Var(within=Reals,bounds=(0,1),initialize=0) m.x446 = Var(within=Reals,bounds=(0,1),initialize=0) m.x447 = Var(within=Reals,bounds=(0,1),initialize=0) m.x448 = Var(within=Reals,bounds=(0,1),initialize=0) m.x449 = Var(within=Reals,bounds=(0,1),initialize=0) m.x450 = Var(within=Reals,bounds=(0,1),initialize=0) m.x451 = Var(within=Reals,bounds=(0,1),initialize=0) m.x452 = Var(within=Reals,bounds=(0,1),initialize=0) m.x453 = Var(within=Reals,bounds=(0,1),initialize=0) m.x454 = Var(within=Reals,bounds=(0,1),initialize=0) m.x455 = Var(within=Reals,bounds=(0,1),initialize=0) m.x456 = Var(within=Reals,bounds=(0,1),initialize=0) m.x457 = Var(within=Reals,bounds=(0,1),initialize=0) m.x458 = Var(within=Reals,bounds=(0,1),initialize=0) m.x459 = Var(within=Reals,bounds=(0,1),initialize=0) m.x460 = Var(within=Reals,bounds=(0,1),initialize=0) m.x461 = Var(within=Reals,bounds=(0,1),initialize=0) m.x462 = Var(within=Reals,bounds=(0,1),initialize=0) m.x463 = Var(within=Reals,bounds=(0,1),initialize=0) m.x464 = Var(within=Reals,bounds=(0,1),initialize=0) m.x465 = Var(within=Reals,bounds=(0,1),initialize=0) m.x466 = Var(within=Reals,bounds=(0,1),initialize=0) m.x467 = Var(within=Reals,bounds=(0,1),initialize=0) m.x468 = Var(within=Reals,bounds=(0,1),initialize=0) m.x469 = Var(within=Reals,bounds=(0,1),initialize=0) m.x470 = Var(within=Reals,bounds=(0,1),initialize=0) m.x471 = Var(within=Reals,bounds=(0,1),initialize=0) m.x472 = Var(within=Reals,bounds=(0,1),initialize=0) m.x473 = Var(within=Reals,bounds=(0,1),initialize=0) m.x474 = Var(within=Reals,bounds=(0,1),initialize=0) m.x475 = Var(within=Reals,bounds=(0,1),initialize=0) m.x476 = Var(within=Reals,bounds=(0,1),initialize=0) m.x477 = Var(within=Reals,bounds=(0,1),initialize=0) m.x478 = Var(within=Reals,bounds=(0,1),initialize=0) m.x479 = Var(within=Reals,bounds=(0,1),initialize=0) m.x480 = Var(within=Reals,bounds=(0,1),initialize=0) m.x481 = Var(within=Reals,bounds=(0,1),initialize=0) m.x482 = Var(within=Reals,bounds=(0,1),initialize=0) m.x483 = Var(within=Reals,bounds=(0,1),initialize=0) m.x484 = Var(within=Reals,bounds=(0,1),initialize=0) m.x485 = Var(within=Reals,bounds=(0,1),initialize=0) m.x486 = Var(within=Reals,bounds=(0,1),initialize=0) m.x487 = Var(within=Reals,bounds=(0,1),initialize=0) m.x488 = Var(within=Reals,bounds=(0,1),initialize=0) m.x489 = Var(within=Reals,bounds=(0,1),initialize=0) m.x490 = Var(within=Reals,bounds=(0,1),initialize=0) m.x491 = Var(within=Reals,bounds=(0,1),initialize=0) m.x492 = Var(within=Reals,bounds=(0,1),initialize=0) m.x493 = Var(within=Reals,bounds=(0,1),initialize=0) m.x494 = Var(within=Reals,bounds=(0,1),initialize=0) m.x495 = Var(within=Reals,bounds=(0,1),initialize=0) m.x496 = Var(within=Reals,bounds=(0,1),initialize=0) m.x497 = Var(within=Reals,bounds=(0,1),initialize=0) m.x498 = Var(within=Reals,bounds=(0,1),initialize=0) m.x499 = Var(within=Reals,bounds=(0,1),initialize=0) m.x500 = Var(within=Reals,bounds=(0,1),initialize=0) m.x501 = Var(within=Reals,bounds=(0,1),initialize=0) m.x502 = Var(within=Reals,bounds=(0,1),initialize=0) m.x503 = Var(within=Reals,bounds=(0,1),initialize=0) m.x504 = Var(within=Reals,bounds=(0,1),initialize=0) m.x505 = Var(within=Reals,bounds=(0,1),initialize=0) m.x506 = Var(within=Reals,bounds=(0,1),initialize=0) m.x507 = Var(within=Reals,bounds=(0,1),initialize=0) m.x508 = Var(within=Reals,bounds=(0,1),initialize=0) m.x509 = Var(within=Reals,bounds=(0,1),initialize=0) m.x510 = Var(within=Reals,bounds=(0,1),initialize=0) m.x511 = Var(within=Reals,bounds=(0,1),initialize=0) m.x512 = Var(within=Reals,bounds=(0,1),initialize=0) m.x513 = Var(within=Reals,bounds=(0,1),initialize=0) m.x514 = Var(within=Reals,bounds=(0,1),initialize=0) m.x515 = Var(within=Reals,bounds=(0,1),initialize=0) m.x516 = Var(within=Reals,bounds=(0,1),initialize=0) m.x517 = Var(within=Reals,bounds=(0,1),initialize=0) m.x518 = Var(within=Reals,bounds=(0,1),initialize=0) m.x519 = Var(within=Reals,bounds=(0,1),initialize=0) m.x520 = Var(within=Reals,bounds=(0,1),initialize=0) m.x521 = Var(within=Reals,bounds=(0,1),initialize=0) m.x522 = Var(within=Reals,bounds=(0,1),initialize=0) m.x523 = Var(within=Reals,bounds=(0,1),initialize=0) m.x524 = Var(within=Reals,bounds=(0,1),initialize=0) m.x525 = Var(within=Reals,bounds=(0,1),initialize=0) m.x526 = Var(within=Reals,bounds=(0,1),initialize=0) m.x527 = Var(within=Reals,bounds=(0,1),initialize=0) m.x528 = Var(within=Reals,bounds=(0,1),initialize=0) m.x529 = Var(within=Reals,bounds=(0,1),initialize=0) m.x530 = Var(within=Reals,bounds=(0,1),initialize=0) m.x531 = Var(within=Reals,bounds=(0,1),initialize=0) m.x532 = Var(within=Reals,bounds=(0,1),initialize=0) m.x533 = Var(within=Reals,bounds=(0,1),initialize=0) m.x534 = Var(within=Reals,bounds=(0,1),initialize=0) m.x535 = Var(within=Reals,bounds=(0,1),initialize=0) m.x536 = Var(within=Reals,bounds=(0,1),initialize=0) m.x537 = Var(within=Reals,bounds=(0,1),initialize=0) m.x538 = Var(within=Reals,bounds=(0,1),initialize=0) m.x539 = Var(within=Reals,bounds=(0,1),initialize=0) m.x540 = Var(within=Reals,bounds=(0,1),initialize=0) m.x541 = Var(within=Reals,bounds=(0,1),initialize=0) m.x542 = Var(within=Reals,bounds=(0,1),initialize=0) m.x543 = Var(within=Reals,bounds=(0,1),initialize=0) m.x544 = Var(within=Reals,bounds=(0,1),initialize=0) m.x545 = Var(within=Reals,bounds=(0,1),initialize=0) m.x546 = Var(within=Reals,bounds=(0,1),initialize=0) m.x547 = Var(within=Reals,bounds=(0,1),initialize=0) m.x548 = Var(within=Reals,bounds=(0,1),initialize=0) m.x549 = Var(within=Reals,bounds=(0,1),initialize=0) m.x550 = Var(within=Reals,bounds=(0,1),initialize=0) m.x551 = Var(within=Reals,bounds=(0,1),initialize=0) m.x552 = Var(within=Reals,bounds=(0,1),initialize=0) m.x553 = Var(within=Reals,bounds=(0,1),initialize=0) m.x554 = Var(within=Reals,bounds=(0,1),initialize=0) m.x555 = Var(within=Reals,bounds=(0,1),initialize=0) m.x556 = Var(within=Reals,bounds=(0,1),initialize=0) m.x557 = Var(within=Reals,bounds=(0,1),initialize=0) m.x558 = Var(within=Reals,bounds=(0,1),initialize=0) m.x559 = Var(within=Reals,bounds=(0,1),initialize=0) m.x560 = Var(within=Reals,bounds=(0,1),initialize=0) m.x561 = Var(within=Reals,bounds=(0,1),initialize=0) m.x562 = Var(within=Reals,bounds=(0,1),initialize=0) m.x563 = Var(within=Reals,bounds=(0,1),initialize=0) m.x564 = Var(within=Reals,bounds=(0,1),initialize=0) m.x565 = Var(within=Reals,bounds=(0,1),initialize=0) m.x566 = Var(within=Reals,bounds=(0,1),initialize=0) m.x567 = Var(within=Reals,bounds=(0,1),initialize=0) m.x568 = Var(within=Reals,bounds=(0,1),initialize=0) m.x569 = Var(within=Reals,bounds=(0,1),initialize=0) m.x570 = Var(within=Reals,bounds=(0,1),initialize=0) m.x571 = Var(within=Reals,bounds=(0,1),initialize=0) m.x572 = Var(within=Reals,bounds=(0,1),initialize=0) m.x573 = Var(within=Reals,bounds=(0,1),initialize=0) m.x574 = Var(within=Reals,bounds=(0,1),initialize=0) m.x575 = Var(within=Reals,bounds=(0,1),initialize=0) m.x576 = Var(within=Reals,bounds=(0,1),initialize=0) m.x577 = Var(within=Reals,bounds=(0,1),initialize=0) m.x578 = Var(within=Reals,bounds=(0,16),initialize=0) m.x579 = Var(within=Reals,bounds=(0,16),initialize=0) m.x580 = Var(within=Reals,bounds=(0,16),initialize=0) m.x581 = Var(within=Reals,bounds=(0,16),initialize=0) m.x582 = Var(within=Reals,bounds=(0,16),initialize=0) m.x583 = Var(within=Reals,bounds=(0,16),initialize=0) m.x584 = Var(within=Reals,bounds=(0,16),initialize=0) m.x585 = Var(within=Reals,bounds=(0,16),initialize=0) m.x586 = Var(within=Reals,bounds=(0,16),initialize=0) m.x587 = Var(within=Reals,bounds=(0,16),initialize=0) m.x588 = Var(within=Reals,bounds=(0,16),initialize=0) m.x589 = Var(within=Reals,bounds=(0,16),initialize=0) m.x590 = Var(within=Reals,bounds=(0,16),initialize=0) m.x591 = Var(within=Reals,bounds=(0,16),initialize=0) m.x592 = Var(within=Reals,bounds=(0,16),initialize=0) m.x593 = Var(within=Reals,bounds=(0,16),initialize=0) m.x594 = Var(within=Reals,bounds=(0,16),initialize=0) m.x595 = Var(within=Reals,bounds=(0,16),initialize=0) m.x596 = Var(within=Reals,bounds=(0,16),initialize=0) m.x597
target is not already defined: we should try to infer the type if self.type_inference is True: # Perform type inference # Build dictionary with symbols def_symbols = {} def_symbols.update(self.locals.get_name_type_associations()) def_symbols.update(self.defined_symbols) inferred_symbols = type_inference.infer_types( t, def_symbols) inferred_type = inferred_symbols[target.id] self.locals.define(target.id, t.lineno, self._indent, inferred_type) self.write(dace.dtypes._CTYPES[inferred_type.type] + " ") else: self.locals.define(target.id, t.lineno, self._indent) self.write("auto ") # dispatch target self.dispatch(target) #if not infer_type: # inferred_type = self.dispatch(target, True) #self.dtype = inferred_type self.write(" = ") self.dispatch(t.value) #self.dtype = inferred_type self.write(';') def _AugAssign(self, t): self.fill() self.dispatch(t.target) # Operations that require a function call if t.op.__class__.__name__ in self.funcops: separator, func = self.funcops[t.op.__class__.__name__] self.write(" = " + func + "(") self.dispatch(t.target) self.write(separator + " ") self.dispatch(t.value) self.write(")") else: self.write(" " + self.binop[t.op.__class__.__name__] + "= ") self.dispatch(t.value) self.write(';') def _AnnAssign(self, t): self.fill() if isinstance(t.target, ast.Tuple): if len(t.target.elts) > 1: self.dispatch_lhs_tuple(t.target.elts) else: target = t.target.elts[0] else: target = t.target # Assignment of the form x: int = 0 is converted to int x = (int)0; if not self.locals.is_defined(target.id, self._indent): if self.type_inference is True: # get the type indicated into the annotation def_symbols = self.defined_symbols.copy() def_symbols.update(self.locals.get_name_type_associations()) inferred_symbols = type_inference.infer_types(t, def_symbols) inferred_type = inferred_symbols[target.id] self.locals.define(target.id, t.lineno, self._indent, inferred_type) else: self.locals.define(target.id, t.lineno, self._indent) self.dispatch(t.annotation) self.write(' ') if not t.simple: self.write("(") self.dispatch(t.target) if not t.simple: self.write(")") if t.value: self.write(" = (") self.dispatch(t.annotation) self.write(")") self.dispatch(t.value) self.write(';') def _Return(self, t): self.fill("return") if t.value: self.write(" ") self.dispatch(t.value) self.write(';') def _Pass(self, t): self.fill(";") def _Break(self, t): self.fill("break;") def _Continue(self, t): self.fill("continue;") def _Delete(self, t): raise NotImplementedError('Invalid C++') def _Assert(self, t): self.fill("assert(") self.dispatch(t.test) if t.msg: self.write(", ") self.dispatch(t.msg) self.write(");") def _Exec(self, t): raise NotImplementedError('Invalid C++') def _Print(self, t): do_comma = False if t.dest: self.fill("fprintf(") self.dispatch(t.dest) do_comma = True else: self.fill("printf(") for e in t.values: if do_comma: self.write(", ") else: do_comma = True self.dispatch(e) if not t.nl: self.write(",") self.write(');') def _Global(self, t): raise NotImplementedError('Invalid C++') def _Nonlocal(self, t): raise NotImplementedError('Invalid C++') def _Yield(self, t): raise NotImplementedError('Invalid C++') def _YieldFrom(self, t): raise NotImplementedError('Invalid C++') def _Raise(self, t): self.fill("throw") if not t.exc: assert not t.cause return self.write(" ") self.dispatch(t.exc) if t.cause: raise NotImplementedError('Invalid C++') self.write(';') def _Try(self, t): self.fill("try") self.enter() self.dispatch(t.body) self.leave() for ex in t.handlers: self.dispatch(ex) if t.orelse: raise NotImplementedError('Invalid C++') if t.finalbody: self.fill("finally") self.enter() self.dispatch(t.finalbody) self.leave() def _TryExcept(self, t): self.fill("try") self.enter() self.dispatch(t.body) self.leave() for ex in t.handlers: self.dispatch(ex) if t.orelse: raise NotImplementedError('Invalid C++') def _TryFinally(self, t): if len(t.body) == 1 and isinstance(t.body[0], ast.TryExcept): # try-except-finally self.dispatch(t.body) else: self.fill("try") self.enter() self.dispatch(t.body) self.leave() self.fill("finally") self.enter() self.dispatch(t.finalbody) self.leave() def _ExceptHandler(self, t): self.fill("catch (") if t.type: self.dispatch(t.type) if t.name: self.write(t.name) self.write(')') self.enter() self.dispatch(t.body) self.leave() def _write_constant(self, value): result = repr(value) if isinstance(value, (float, complex)): # Substitute overflowing decimal literal for AST infinities. self.write(result.replace("inf", INFSTR)) else: self.write(result.replace('\'', '\"')) def _Constant(self, t): value = t.value if value is True or value is False or value is None: self.write(_py2c_nameconst[value]) else: if isinstance(value, tuple): self.write("(") if len(value) == 1: self._write_constant(value[0]) self.write(",") else: interleave(lambda: self.write(", "), self._write_constant, value) self.write(")") elif value is Ellipsis: # instead of `...` for Py2 compatibility self.write("...") else: self._write_constant(t.value) def _ClassDef(self, t): raise NotImplementedError('Classes are unsupported') def _generic_FunctionDef(self, t, is_async=False): self.write("\n") for deco in t.decorator_list: self.fill("// Decorator: ") self.dispatch(deco) if is_async: self.write('/* async */ ') if getattr(t, "returns", False): if isinstance(t.returns, ast.NameConstant): if t.returns.value is None: self.write('void') else: self.dispatch(t.returns) else: self.dispatch(t.returns) self.fill(" " + t.name + "(") else: self.fill("auto " + t.name + "(") self.dispatch(t.args) self.write(")") self.enter() self.dispatch(t.body) self.leave() def _FunctionDef(self, t): self._generic_FunctionDef(t) def _AsyncFunctionDef(self, t): self._generic_FunctionDef(t, is_async=True) def _generic_For(self, t, is_async=False): if is_async: self.fill("/* async */ for (") else: self.fill("for (") if isinstance(t.target, ast.Tuple): self.write("auto ") if len(t.target.elts) == 1: (elt, ) = t.target.elts self.locals.define(elt.id, t.lineno, self._indent + 1) self.dispatch(elt) else: self.write("[") interleave(lambda: self.write(", "), self.dispatch, t.target.elts) for elt in t.target.elts: self.locals.define(elt.id, t.lineno, self._indent + 1) self.write("]") else: if not self.locals.is_defined(t.target.id, self._indent): self.locals.define(t.target.id, t.lineno, self._indent + 1) self.write('auto ') self.dispatch(t.target) self.write(" : ") self.dispatch(t.iter) self.write(")") self.enter() self.dispatch(t.body) self.leave() if t.orelse: raise NotImplementedError('Invalid C++') def _For(self, t): self._generic_For(t) def _AsyncFor(self, t): self._generic_For(t, is_async=True) def _If(self, t): self.fill("if (") self.dispatch(t.test) self.write(')') self.enter() self.dispatch(t.body) self.leave() # collapse nested ifs into equivalent elifs. while (t.orelse and len(t.orelse) == 1 and isinstance(t.orelse[0], ast.If)): t = t.orelse[0] self.fill("else if (") self.dispatch(t.test) self.write(')') self.enter() self.dispatch(t.body) self.leave() # final else if t.orelse: self.fill("else") self.enter() self.dispatch(t.orelse) self.leave() def _While(self, t): self.fill("while (") self.dispatch(t.test) self.write(')') self.enter() self.dispatch(t.body) self.leave() if t.orelse: raise NotImplementedError('Invalid C++') def _generic_With(self, t, is_async=False): raise NotImplementedError('Invalid C++') def _With(self, t): self._generic_With(t) def _AsyncWith(self, t): self._generic_With(t, is_async=True) # expr def _Bytes(self, t): self._write_constant(t.s) def _Str(self, tree): result = tree.s self._write_constant(result) format_conversions = {97: 'a', 114: 'r', 115: 's'} def _FormattedValue(self, t): # FormattedValue(expr value, int? conversion, expr? format_spec) self.write("{") self.dispatch(t.value) if t.conversion is not None and t.conversion != -1: self.write("!") self.write(self.format_conversions[t.conversion]) # raise NotImplementedError(ast.dump(t, True, True)) if t.format_spec is not None: self.write(":") if isinstance(t.format_spec, ast.Str): self.write(t.format_spec.s) else: self.dispatch(t.format_spec) self.write("}") def _JoinedStr(self, t): # JoinedStr(expr* values) self.write("f'''") for value in t.values: if isinstance(value, ast.Str): self.write(value.s) else: self.dispatch(value) self.write("'''") def _Name(self, t): if t.id in _py2c_reserved: self.write(_py2c_reserved[t.id]) else: self.write(t.id) def _NameConstant(self, t): self.write(_py2c_nameconst[t.value]) def _Repr(self, t): raise NotImplementedError('Invalid C++') def _Num(self, t): repr_n = repr(t.n) # For complex values, use type of assignment (if exists), or # double-complex (128-bit) otherwise dtype = self.dtype or 'dace::complex128' if repr_n.endswith("j"): self.write("%s(0, %s)" % (dtype, repr_n.replace("inf", INFSTR)[:-1])) else: self.write(repr_n.replace("inf", INFSTR)) def _List(self, t): raise NotImplementedError('Invalid C++') # self.write("[") # interleave(lambda: self.write(", "), self.dispatch, t.elts) # self.write("]") def _ListComp(self): raise NotImplementedError('Invalid C++') # self.write("[") # self.dispatch(t.elt) # for gen in t.generators: # self.dispatch(gen) # self.write("]") def _GeneratorExp(self, t): raise NotImplementedError('Invalid C++') # self.write("(") # self.dispatch(t.elt) # for gen in t.generators: # self.dispatch(gen) # self.write(")") def _SetComp(self, t): raise NotImplementedError('Invalid C++') # self.write("{") # self.dispatch(t.elt) # for gen in t.generators: # self.dispatch(gen) # self.write("}") def _DictComp(self, t): raise NotImplementedError('Invalid C++') # self.write("{") # self.dispatch(t.key) # self.write(": ") # self.dispatch(t.value) # for gen in t.generators: # self.dispatch(gen) # self.write("}") def _comprehension(self, t): raise NotImplementedError('Invalid C++') # if getattr(t, 'is_async', False): # self.write(" async") # self.write(" for ") # self.dispatch(t.target) # self.write(" in ") # self.dispatch(t.iter) # for if_clause in t.ifs: # self.write(" if ") # self.dispatch(if_clause) def _IfExp(self, t): self.write("(") self.dispatch(t.test) self.write(" ? ") type_body = self.dispatch(t.body) self.write(" : ") type_orelse = self.dispatch(t.orelse) self.write(")") def _Set(self, t): raise NotImplementedError('Invalid C++') # assert(t.elts) # should be at least one element # self.write("{") # interleave(lambda: self.write(", "), self.dispatch, t.elts) # self.write("}") def _Dict(self, t): raise NotImplementedError('Invalid C++') # self.write("{") # def write_pair(pair): # (k, v) = pair # self.dispatch(k) # self.write(": ") # self.dispatch(v) # interleave(lambda: self.write(", "), write_pair, zip(t.keys, t.values)) # self.write("}") def _Tuple( self, t, ): self.write("std::make_tuple(") if len(t.elts) == 1: (elt, ) = t.elts self.dispatch(elt) self.write(",") else: interleave(lambda: self.write(", "), self.dispatch, t.elts) self.write(")") unop = {"Invert": "~", "Not": "!", "UAdd": "+", "USub": "-"} def _UnaryOp(self, t): self.write("(") self.write(self.unop[t.op.__class__.__name__]) self.write(" ") self.dispatch(t.operand) self.write(")") binop = { "Add": "+", "Sub": "-", "Mult": "*", "Div": "/", "Mod": "%", "LShift": "<<", "RShift": ">>", "BitOr": "|", "BitXor": "^", "BitAnd": "&" } funcops = { "FloorDiv": (" /", "dace::math::ifloor"), "MatMult": (",", "dace::gemm") } def _BinOp(self, t): # Operations that require a function call if t.op.__class__.__name__ in self.funcops: separator, func = self.funcops[t.op.__class__.__name__] self.write(func + "(") # get the type of left and right operands for type inference self.dispatch(t.left) self.write(separator + " ") self.dispatch(t.right) self.write(")") # Special case for integer power elif t.op.__class__.__name__ == 'Pow': if (isinstance(t.right, (ast.Num, ast.Constant)) and int(t.right.n) == t.right.n and t.right.n >= 0): self.write("(") if t.right.n == 0: self.write("1") else: self.dispatch(t.left) for i in range(int(t.right.n) - 1): self.write(" * ") self.dispatch(t.left) self.write(")") else: self.write("dace::math::pow(") self.dispatch(t.left) self.write(", ") self.dispatch(t.right) self.write(")") else: self.write("(") # get left and right types for type inference self.dispatch(t.left) self.write(" " + self.binop[t.op.__class__.__name__] + " ") self.dispatch(t.right) self.write(")") cmpops = { "Eq": "==", "NotEq": "!=", "Lt": "<", "LtE": "<=", "Gt": ">", "GtE": ">=", "Is": "==", "IsNot": "!=", # "In":"in", "NotIn":"not in" } def _Compare(self, t): self.write("(") self.dispatch(t.left) for o, e in zip(t.ops,
= query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, -1e10).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) # Mask heads if we want to if layer_head_mask is not None: attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxBertSelfOutput(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class FlaxBertAttention(nn.Module): config: BertConfig causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): self.self = FlaxBertSelfAttention(self.config, causal=self.causal, dtype=self.dtype) self.output = FlaxBertSelfOutput(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states=None, init_cache=False, deterministic=True, output_attentions: bool = False, ): # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length) # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length) attn_outputs = self.self( hidden_states, attention_mask, layer_head_mask=layer_head_mask, key_value_states=key_value_states, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs class FlaxBertIntermediate(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class FlaxBertOutput(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_output, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + attention_output) return hidden_states class FlaxBertLayer(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.attention = FlaxBertAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype) self.intermediate = FlaxBertIntermediate(self.config, dtype=self.dtype) self.output = FlaxBertOutput(self.config, dtype=self.dtype) if self.config.add_cross_attention: self.crossattention = FlaxBertAttention(self.config, causal=False, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, ): # Self Attention attention_outputs = self.attention( hidden_states, attention_mask, layer_head_mask=layer_head_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = attention_outputs[0] # Cross-Attention Block if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, key_value_states=encoder_hidden_states, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] hidden_states = self.intermediate(attention_output) hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) if encoder_hidden_states is not None: outputs += (cross_attention_outputs[1],) return outputs class FlaxBertLayerCollection(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ FlaxBertLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # Check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.shape[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for \ {head_mask.shape[0]}." ) for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, layer_head_mask=head_mask[i] if head_mask is not None else None, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states,) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) class FlaxBertEncoder(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layer = FlaxBertLayerCollection(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return self.layer( hidden_states, attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) class FlaxBertPooler(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__(self, hidden_states): cls_hidden_state = hidden_states[:, 0] cls_hidden_state = self.dense(cls_hidden_state) return nn.tanh(cls_hidden_state) class FlaxBertPredictionHeadTransform(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype) self.activation = ACT2FN[self.config.hidden_act] self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return self.LayerNorm(hidden_states) class FlaxBertLMPredictionHead(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.transform = FlaxBertPredictionHeadTransform(self.config, dtype=self.dtype) self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False) self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,)) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.transform(hidden_states) if shared_embedding is not None: hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: hidden_states = self.decoder(hidden_states) bias = jnp.asarray(self.bias, self.dtype) hidden_states += bias return hidden_states class FlaxBertOnlyMLMHead(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.predictions = FlaxBertLMPredictionHead(self.config, dtype=self.dtype) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.predictions(hidden_states, shared_embedding=shared_embedding) return hidden_states class FlaxBertOnlyNSPHead(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): self.seq_relationship = nn.Dense(2, dtype=self.dtype) def __call__(self, pooled_output): return self.seq_relationship(pooled_output) class FlaxBertPreTrainingHeads(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.predictions = FlaxBertLMPredictionHead(self.config, dtype=self.dtype) self.seq_relationship = nn.Dense(2, dtype=self.dtype) def __call__(self, hidden_states, pooled_output, shared_embedding=None): prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class FlaxBertPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig base_model_prefix = "bert" module_class: nn.Module = None def __init__( self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") token_type_ids = jnp.zeros_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) attention_mask = jnp.ones_like(input_ids) head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states, encoder_attention_mask, return_dict=False, ) else: module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False ) random_params = module_init_outputs["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length), dtype="i4") attention_mask = jnp.ones_like(input_ids, dtype="i4") position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask,
str(numberOfElements)+'\n' gamText = self.addSignalParameters( inputDict['value_nid'].getParent().getNode('parameters'), gamText) signalDict['dimensions'] = numberOfDimensions signalDict['elements'] = numberOfElements # endif len(inputDict['fields']) > 0 # endif Normal Reference gamText += ' }\n' inputSignals.append(signalDict) if len(inputDicts) > 0: gamText += ' }\n' # Output Signals outputSignals = [] # For debug printout synchThreadSignals = [] asynchThreadSignals = [] outputsToBeExpanded = [] if len(outputDicts) > 0: gamText += ' OutputSignals = {\n' for outputDict in outputDicts: outputSignalDict = {} outputSignalDict['name'] = outputDict['name'] gamText += ' '+outputDict['name']+' = {\n' gamText += ' DataSource = '+gamName+'_Output_DDB\n' gamText += ' Type = '+outputDict['type']+'\n' outputSignalDict['type'] = outputDict['type'] if outputDict['dimensions'] == 0: numberOfElements = 1 numberOfDimensions = 0 outputSignalDict['dimensions'] = 0 outputSignalDict['elements'] = 1 else: numberOfDimensions = len(outputDict['dimensions']) numberOfElements = 1 for currDim in outputDict['dimensions']: numberOfElements *= currDim gamText += ' NumberOfDimensions = ' + \ str(numberOfDimensions)+'\n' gamText += ' NumberOfElements = '+str(numberOfElements)+'\n' outputSignalDict['dimensions'] = numberOfDimensions outputSignalDict['elements'] = numberOfElements gamText = self.addSignalParameters( outputDict['value_nid'].getParent().getNode('parameters'), gamText) gamText += ' }\n' if self.isUsedOnAnotherThread(threadMap, outputDict['value_nid'], True): synchThreadSignals.append(outputDict['name']) if self.isUsedOnAnotherThread(threadMap, outputDict['value_nid'], False): asynchThreadSignals.append(outputDict['name']) outputSignals.append(outputSignalDict) # --------------------------------------------If this is a structured output if len(outputDict['fields']) > 0: if not self.addType(outputDict['type'], outputDict['fields'], typeDicts): raise Exception( 'Incompatible Type definition: '+inputDict['type']) # Check if any expanded field of thid output is used by other components if self.isAnyFieldUsed(threadMap, outputDict): outputsToBeExpanded.append(outputDict) if len(outputDicts) > 0: gamText += ' }\n' gamText += ' }\n' gams.append(gamText) dataSourceText = ' +'+gamName+'_Output_DDB = {\n' dataSourceText += ' Class = GAMDataSource\n' dataSourceText += ' }\n' dataSources.append(dataSourceText) # If any structured output has to be expanded because any of its fields is referenced, create DDB and relative IOGAM if len(outputsToBeExpanded) > 0: gamText = ' +'+gamName+'_Output_Bus_IOGAM = {\n' gamText += ' Class = IOGAM\n' gamText += ' InputSignals = {\n' for outputDict in outputsToBeExpanded: gamText += ' '+outputDict['name']+' = {\n' gamText += ' Type = '+outputDict['type']+'\n' gamText += ' DataSource = '+gamName+'_Output_DDB\n' gamText += ' }\n' gamText += ' }\n' gamText += ' OutputSignals = {\n' for outputDict in outputsToBeExpanded: for fieldDict in outputDict['fields']: gamText += ' ' + \ outputDict['name']+'_'+fieldDict['name'] + ' = {\n' gamText += ' Type = '+fieldDict['type']+'\n' gamText += ' DataSource = '+gamName+'_Expanded_Output_DDB\n' gamText += ' }\n' gamText += ' }\n' gamText += ' }\n' gams.append(gamText) # NOTE: for expanded outputs communication is supported only within the same thread!!!!!!!!!!!! gamList.append(gamName+'_Output_Bus_IOGAM') dataSourceText = ' +'+gamName+'_Expanded_Output_DDB = {\n' dataSourceText += ' Class = GAMDataSource\n' dataSourceText += ' }\n' dataSources.append(dataSourceText) # If any output has to be stored We need to declare out DDB, out MdsWriter and relative IOGAM if configDict['storeSignals']: dataSourceText = ' +'+gamName+'_TreeOutput = {\n' dataSourceText += ' Class = MDSWriter\n' if outputTrigger == None: dataSourceText += ' NumberOfBuffers = 20000\n' else: dataSourceText += ' NumberOfBuffers = ' + \ str(configDict['preTrigger'] + configDict['postTrigger']+1)+'\n' dataSourceText += ' NumberOfPreTriggers = ' + \ str(configDict['preTrigger'])+'\n' dataSourceText += ' NumberOfPostTriggers = ' + \ str(configDict['postTrigger'])+'\n' dataSourceText += ' CPUMask = ' + \ str(configDict['cpuMask'])+'\n' dataSourceText += ' StackSize = 10000000\n' dataSourceText += ' TreeName = "'+self.getTree().name+'"\n' dataSourceText += ' PulseNumber = ' + \ str(self.getTree().shot)+'\n' if outputTrigger == None: dataSourceText += ' StoreOnTrigger = 0\n' else: dataSourceText += ' StoreOnTrigger = 1\n' dataSourceText += ' EventName = "'+gamName+'UpdatejScope"\n' dataSourceText += ' TimeRefresh = 1\n' dataSourceText += ' Signals = {\n' currTimebase = self.timebase.evaluate() if isinstance(currTimebase, Range): period = currTimebase.delta.data() else: currTimebase = currTimebase.data() period = currTimebase[1] - currTimebase[0] try: syncDiv = self.timebase_div.data() period = period * syncDiv except: pass # If trigger is defined put it as first signal if outputTrigger != None: dataSourceText += ' Trigger = {\n' dataSourceText += ' Type = uint8\n' dataSourceText += ' }\n' # Time Management dataSourceText += ' Time = {\n' dataSourceText += ' NodeName = "' + \ configDict['outTimeNid'].getFullPath()+'"\n' # keep into account possibl sample information for that GAM currSamples = 1 try: currSamples = outputDict['samples'] except: currSamples = 1 dataSourceText += ' Period = '+str(period/currSamples)+'\n' dataSourceText += ' MakeSegmentAfterNWrites = 100\n' dataSourceText += ' AutomaticSegmentation = 0\n' if outputTrigger != None: dataSourceText += ' TimeSignal = 1\n' if startTime != 0: dataSourceText += ' SamplePhase = ' + \ str(int(round(startTime/period)))+'\n' dataSourceText += ' }\n' for outputDict in outputDicts: if outputDict['seg_len'] > 0: dataSourceText += ' '+outputDict['name']+' = {\n' dataSourceText += ' NodeName = "' + \ outputDict['value_nid'].getFullPath()+'"\n' dataSourceText += ' Period = ' + \ str(period/currSamples)+'\n' dataSourceText += ' MakeSegmentAfterNWrites = ' + \ str(outputDict['seg_len'])+'\n' dataSourceText += ' AutomaticSegmentation = 0\n' if startTime != 0: dataSourceText += ' SamplePhase = ' + \ str(int(round(startTime/period)))+'\n' dataSourceText += ' }\n' # Check if the output is a struct and seglen is > 0 for one o more fields for fieldDict in outputDict['fields']: if fieldDict['seg_len'] > 0: dataSourceText += ' ' + \ outputDict['name']+'_'+fieldDict['name']+' = {\n' dataSourceText += ' NodeName = "' + \ fieldDict['value_nid'].getFullPath()+'"\n' dataSourceText += ' Period = ' + \ str(period/currSamples)+'\n' dataSourceText += ' MakeSegmentAfterNWrites = ' + \ str(fieldDict['seg_len'])+'\n' dataSourceText += ' AutomaticSegmentation = 0\n' if startTime != 0: dataSourceText += ' SamplePhase = ' + \ str(int(round(startTime/period)))+'\n' dataSourceText += ' }\n' # end for fieldDict in outputDict['fields']: dataSourceText += ' }\n' dataSourceText += ' }\n' dataSources.append(dataSourceText) gamList.append(gamName+'_TreeOutIOGAM') gamText = ' +'+gamName+'_TreeOutIOGAM = {\n' if outputTrigger != None: # If using output trigger, the trigger must be converted to uint8 gamText += ' Class = ConversionGAM\n' else: gamText += ' Class = IOGAM\n' gamText += ' InputSignals = {\n' # MdsWriter Trigger management if outputTrigger != None: physicalTrigger = True try: triggerNode = outputTrigger.getParent().getParent().getParent() # If the trigger is pysically generated, i.e. it is derived from another device (GAM or Input) if triggerNode.getUsage() == 'DEVICE': triggerGamName = self.convertPath( triggerNode.getFullPath()) triggerSigName = outputTrigger.getParent().getNode(':name').data() gamText += ' '+triggerSigName+' = {\n' if self.onSameThread(threadMap, triggerNode): gamText += ' DataSource = '+triggerGamName+'_Output_DDB\n' elif self.sameSynchSource(sourceNode): gamText += ' DataSource = '+triggerGamName+'_Output_Synch\n' try: syncDiv = self.timebase_div.data() gamText += ' Samples = ' + \ str(syncDiv)+'\n' except: pass # Consider RealTimeSynchronization downsampling only if timebase_div is defined else: gamText += ' DataSource = '+triggerGamName+'_Output_Asynch\n' gamText += ' }\n' else: physicalTrigger = False except: physicalTrigger = False if(not physicalTrigger): # Trigger source is derived from a stored input waveform nonGamInputNodes.append({'expr': outputTrigger.decompile( ), 'dimensions': 0, 'name': 'Trigger', 'col_order': False}) gamText += ' '+'Trigger'+' = {\n' gamText += ' DataSource = '+gamName+'_TreeInput\n' gamText += ' Type = uint8\n' gamText += ' }\n' # end Trigger Management # Time signal management gamText += ' Time = {\n' gamText += ' DataSource = ' + timerDDB+'\n' gamText += ' Type = uint32\n' gamText += ' }\n' # Other output signals # first non struct outputs for outputDict in outputDicts: if outputDict['seg_len'] > 0 and len(outputDict['fields']) == 0: gamText += ' '+outputDict['name'] + ' = {\n' gamText += ' DataSource = '+gamName+'_Output_DDB\n' gamText += ' }\n' # then struct outputs for which at least one field has seg_len > 0 for outputDict in outputDicts: fieldsToStore = False for fieldDict in outputDict['fields']: if fieldDict['seg_len'] > 0: fieldsToStore = True if fieldsToStore: gamText += ' '+outputDict['name'] + ' = {\n' gamText += ' DataSource = '+gamName+'_Output_DDB\n' gamText += ' Type = ' + outputDict['type']+'\n' gamText += ' }\n' # end for outputDict in outputDicts: gamText += ' }\n' gamText += ' OutputSignals = {\n' if outputTrigger != None: gamText += ' Trigger = {\n' gamText += ' DataSource = '+gamName+'_TreeOutput\n' gamText += ' type = uint8\n' gamText += ' }\n' # Time signal gamText += ' Time = {\n' gamText += ' DataSource = '+gamName+'_TreeOutput\n' gamText += ' Type = uint32\n' gamText += ' }\n' # Other signals for outputDict in outputDicts: # first non struct outputs if outputDict['seg_len'] > 0 and len(outputDict['fields']) == 0: gamText += ' '+outputDict['name'] + ' = {\n' gamText += ' DataSource = '+gamName+'_TreeOutput\n' gamText += ' Type = '+outputDict['type']+'\n' # If the GAM device defines Samples in its output, take precedence over dimensions information hasSamples = False try: currSamples = outputDict['samples'] if currSamples > 1: hasSamples =
<filename>dcos/package.py import abc import base64 import collections import copy import hashlib import json import os import re import shutil import stat import subprocess import zipfile from distutils.version import LooseVersion import git import portalocker import pystache import six from dcos import (constants, emitting, errors, http, marathon, mesos, subcommand, util) from dcos.errors import DCOSException, DefaultError from six.moves import urllib logger = util.get_logger(__name__) emitter = emitting.FlatEmitter() PACKAGE_METADATA_KEY = 'DCOS_PACKAGE_METADATA' PACKAGE_NAME_KEY = 'DCOS_PACKAGE_NAME' PACKAGE_VERSION_KEY = 'DCOS_PACKAGE_VERSION' PACKAGE_SOURCE_KEY = 'DCOS_PACKAGE_SOURCE' PACKAGE_FRAMEWORK_KEY = 'DCOS_PACKAGE_IS_FRAMEWORK' PACKAGE_RELEASE_KEY = 'DCOS_PACKAGE_RELEASE' PACKAGE_COMMAND_KEY = 'DCOS_PACKAGE_COMMAND' PACKAGE_REGISTRY_VERSION_KEY = 'DCOS_PACKAGE_REGISTRY_VERSION' PACKAGE_FRAMEWORK_NAME_KEY = 'DCOS_PACKAGE_FRAMEWORK_NAME' def install_app(pkg, revision, init_client, options, app_id): """Installs a package's application :param pkg: the package to install :type pkg: Package :param revision: the package revision to install :type revision: str :param init_client: the program to use to run the package :type init_client: object :param options: package parameters :type options: dict :param app_id: app ID for installation of this package :type app_id: str :rtype: None """ # Insert option parameters into the init template init_desc = pkg.marathon_json(revision, options) if app_id is not None: logger.debug('Setting app ID to "%s" (was "%s")', app_id, init_desc['id']) init_desc['id'] = app_id # Send the descriptor to init init_client.add_app(init_desc) def _make_package_labels(pkg, revision, options): """Returns Marathon app labels for a package. :param pkg: The package to install :type pkg: Package :param revision: The package revision to install :type revision: str :param options: package parameters :type options: dict :returns: Marathon app labels :rtype: dict """ metadata = pkg.package_json(revision) encoded_metadata = _base64_encode(metadata) is_framework = metadata.get('framework') if not is_framework: is_framework = False package_registry_version = pkg.registry.get_version() package_labels = { PACKAGE_METADATA_KEY: encoded_metadata, PACKAGE_NAME_KEY: metadata['name'], PACKAGE_VERSION_KEY: metadata['version'], PACKAGE_SOURCE_KEY: pkg.registry.source.url, PACKAGE_FRAMEWORK_KEY: json.dumps(is_framework), PACKAGE_REGISTRY_VERSION_KEY: package_registry_version, PACKAGE_RELEASE_KEY: revision } if pkg.has_command_definition(revision): command = pkg.command_json(revision, options) package_labels[PACKAGE_COMMAND_KEY] = _base64_encode(command) # Run a heuristic that determines the hint for the framework name framework_name = _find_framework_name(pkg.name(), options) if framework_name: package_labels[PACKAGE_FRAMEWORK_NAME_KEY] = framework_name return package_labels def _find_framework_name(package_name, options): """ :param package_name: the name of the package :type package_name: str :param options: the options object :type options: dict :returns: the name of framework if found; None otherwise :rtype: str """ return options.get(package_name, {}).get('framework-name', None) def _base64_encode(dictionary): """Returns base64(json(dictionary)). :param dictionary: dict to encode :type dictionary: dict :returns: base64 encoding :rtype: str """ json_str = json.dumps(dictionary, sort_keys=True) str_bytes = six.b(json_str) return base64.b64encode(str_bytes).decode('utf-8') def uninstall(package_name, remove_all, app_id, cli, app): """Uninstalls a package. :param package_name: The package to uninstall :type package_name: str :param remove_all: Whether to remove all instances of the named app :type remove_all: boolean :param app_id: App ID of the app instance to uninstall :type app_id: str :param init_client: The program to use to run the app :type init_client: object :rtype: None """ if cli is False and app is False: cli = app = True uninstalled = False if cli: if subcommand.uninstall(package_name): uninstalled = True if app: num_apps = uninstall_app( package_name, remove_all, app_id, marathon.create_client(), mesos.DCOSClient()) if num_apps > 0: uninstalled = True if uninstalled: return None else: msg = 'Package [{}]'.format(package_name) if app_id is not None: msg += " with id [{}]".format(app_id) msg += " is not installed." raise DCOSException(msg) def uninstall_subcommand(distribution_name): """Uninstalls a subcommand. :param distribution_name: the name of the package :type distribution_name: str :returns: True if the subcommand was uninstalled :rtype: bool """ return subcommand.uninstall(distribution_name) def uninstall_app(app_name, remove_all, app_id, init_client, dcos_client): """Uninstalls an app. :param app_name: The app to uninstall :type app_name: str :param remove_all: Whether to remove all instances of the named app :type remove_all: boolean :param app_id: App ID of the app instance to uninstall :type app_id: str :param init_client: The program to use to run the app :type init_client: object :param dcos_client: the DCOS client :type dcos_client: dcos.mesos.DCOSClient :returns: number of apps uninstalled :rtype: int """ apps = init_client.get_apps() def is_match(app): encoding = 'utf-8' # We normalize encoding for byte-wise comparison name_label = app.get('labels', {}).get(PACKAGE_NAME_KEY, u'') name_label_enc = name_label.encode(encoding) app_name_enc = app_name.encode(encoding) name_matches = name_label_enc == app_name_enc if app_id is not None: pkg_app_id = app.get('id', '') normalized_app_id = init_client.normalize_app_id(app_id) return name_matches and pkg_app_id == normalized_app_id else: return name_matches matching_apps = [a for a in apps if is_match(a)] if not remove_all and len(matching_apps) > 1: app_ids = [a.get('id') for a in matching_apps] raise DCOSException( ("Multiple apps named [{}] are installed: [{}].\n" + "Please use --app-id to specify the ID of the app to uninstall," + " or use --all to uninstall all apps.").format( app_name, ', '.join(app_ids))) for app in matching_apps: package_json = _decode_and_add_context( app['id'], app.get('labels', {})) # First, remove the app from Marathon init_client.remove_app(app['id'], force=True) # Second, shutdown the framework with Mesos framework_name = app.get('labels', {}).get(PACKAGE_FRAMEWORK_NAME_KEY) if framework_name is not None: logger.info( 'Trying to shutdown framework {}'.format(framework_name)) frameworks = mesos.Master(dcos_client.get_master_state()) \ .frameworks(inactive=True) # Look up all the framework names framework_ids = [ framework['id'] for framework in frameworks if framework['name'] == framework_name ] logger.info( 'Found the following frameworks: {}'.format(framework_ids)) # Emit post uninstall notes emitter.publish( DefaultError( 'Uninstalled package [{}] version [{}]'.format( package_json['name'], package_json['version']))) if 'postUninstallNotes' in package_json: emitter.publish( DefaultError(package_json['postUninstallNotes'])) if len(framework_ids) == 1: dcos_client.shutdown_framework(framework_ids[0]) elif len(framework_ids) > 1: raise DCOSException( "Unable to shutdown the framework for [{}] because there " "are multiple frameworks with the same name: [{}]. " "Manually shut them down using 'dcos service " "shutdown'.".format( framework_name, ', '.join(framework_ids))) return len(matching_apps) class InstalledPackage(object): """Represents an intalled DCOS package. One of `app` and `subcommand` must be supplied. :param apps: A dictionary representing a marathon app. Of the format returned by `installed_apps()` :type apps: [dict] :param subcommand: Installed subcommand :type subcommand: subcommand.InstalledSubcommand """ def __init__(self, apps=[], subcommand=None): assert apps or subcommand self.apps = apps self.subcommand = subcommand def name(self): """ :returns: The name of the package :rtype: str """ if self.subcommand: return self.subcommand.name else: return self.apps[0]['name'] def dict(self): """ A dictionary representation of the package. Used by `dcos package list`. :returns: A dictionary representation of the package. :rtype: dict """ ret = {} if self.subcommand: ret['command'] = {'name': self.subcommand.name} if self.apps: ret['apps'] = [app['appId'] for app in self.apps] if self.subcommand: package_json = self.subcommand.package_json() ret.update(package_json) ret['packageSource'] = self.subcommand.package_source() ret['releaseVersion'] = self.subcommand.package_revision() else: ret.update(self.apps[0]) ret.pop('appId') return ret def installed_packages(init_client, endpoints): """Returns all installed packages in the format: [{ 'apps': [<id>], 'command': { 'name': <name> } ...<metadata>... }] :param init_client: The program to use to list packages :type init_client: object :param endpoints: Whether to include a list of endpoints as port-host pairs :type endpoints: boolean :returns: A list of installed packages :rtype: [InstalledPackage] """ apps = installed_apps(init_client, endpoints) subcommands = installed_subcommands() dicts = collections.defaultdict(lambda: {'apps': [], 'command': None}) for app in apps: key = (app['name'], app['releaseVersion'], app['packageSource']) dicts[key]['apps'].append(app) for subcmd in subcommands: package_revision = subcmd.package_revision() package_source = subcmd.package_source() key = (subcmd.name, package_revision, package_source) dicts[key]['command'] = subcmd return [ InstalledPackage(pkg['apps'], pkg['command']) for pkg in dicts.values() ] def installed_subcommands(): """Returns all installed subcommands. :returns: all installed subcommands :rtype: [InstalledSubcommand] """ return [subcommand.InstalledSubcommand(name) for name in subcommand.distributions()] def installed_apps(init_client, endpoints=False): """ Returns all installed apps. An app is of the format: { 'appId': <appId>, 'packageSource': <source>, 'registryVersion': <app_version>, 'releaseVersion': <release_version> 'endpoints' (optional): [{ 'host': <host>, 'ports': <ports>, }] ..<package.json properties>.. } :param init_client: The program to use to list packages :type init_client: object :param endpoints: Whether to include a list of endpoints as port-host pairs :type endpoints: boolean :returns: all installed apps :rtype: [dict] """ apps = init_client.get_apps() encoded_apps = [(a['id'], a['labels']) for a in apps if a.get('labels', {}).get(PACKAGE_METADATA_KEY)] # Filter elements that failed to parse correctly as JSON valid_apps = [] for app_id, labels in encoded_apps: try: decoded = _decode_and_add_context(app_id, labels) except Exception: logger.exception( 'Unable to decode package metadata during install: %s', app_id) valid_apps.append(decoded) if endpoints: for app in valid_apps: tasks = init_client.get_tasks(app["appId"]) app['endpoints'] = [{"host": t["host"], "ports": t["ports"]} for t in tasks] return valid_apps def _decode_and_add_context(app_id, labels): """ Create an enhanced package JSON from Marathon labels { 'appId': <appId>, 'packageSource': <source>, 'registryVersion': <app_version>, 'releaseVersion': <release_version>, ..<package.json properties>.. } :param app_id: Marathon application id :type app_id: str :param labels: Marathon label dictionary :type labels: dict :rtype: dict """ encoded = labels.get(PACKAGE_METADATA_KEY, {}) decoded = base64.b64decode(six.b(encoded)).decode() decoded_json = util.load_jsons(decoded) decoded_json['appId'] = app_id decoded_json['packageSource'] = labels.get(PACKAGE_SOURCE_KEY) decoded_json['releaseVersion'] = labels.get(PACKAGE_RELEASE_KEY) return decoded_json def search(query, cfg): """Returns a list of index entry collections, one for each registry in the supplied config. :param query: The
from Discord into an integration update event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. Returns ------- hikari.events.guild_events.IntegrationUpdateEvent The parsed integration update event object. """ @abc.abstractmethod def deserialize_presence_update_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject, *, old_presence: typing.Optional[presences_models.MemberPresence], ) -> guild_events.PresenceUpdateEvent: """Parse a raw payload from Discord into a presence update event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. old_presence: typing.Optional[hikari.presences.MemberPresence] The presence object or `builtins.None`. Returns ------- hikari.events.guild_events.PresenceUpdateEvent The parsed presence update event object. """ ###################### # INTERACTION EVENTS # ###################### @abc.abstractmethod def deserialize_interaction_create_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject, ) -> interaction_events.InteractionCreateEvent: """Parse a raw payload from Discord into a interaction create event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. Returns ------- hikari.events.interaction_events.InteractionCreateEvent The parsed interaction create event object. """ ################# # MEMBER EVENTS # ################# @abc.abstractmethod def deserialize_guild_member_add_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject ) -> member_events.MemberCreateEvent: """Parse a raw payload from Discord into a guild member add event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. Returns ------- hikari.events.member_events.MemberCreateEvent The parsed guild member add event object. """ @abc.abstractmethod def deserialize_guild_member_update_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject, *, old_member: typing.Optional[guild_models.Member], ) -> member_events.MemberUpdateEvent: """Parse a raw payload from Discord into a guild member update event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. old_member: typing.Optional[hikari.guilds.Member] The member object or `builtins.None`. Returns ------- hikari.events.member_events.MemberUpdateEvent The parsed guild member update event object. """ @abc.abstractmethod def deserialize_guild_member_remove_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject, *, old_member: typing.Optional[guild_models.Member], ) -> member_events.MemberDeleteEvent: """Parse a raw payload from Discord into a guild member remove event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. old_member: typing.Optional[hikari.guilds.Member] The member object or `builtins.None`. Returns ------- hikari.events.member_events.MemberDeleteEvent The parsed guild member remove event object. """ ############### # ROLE EVENTS # ############### @abc.abstractmethod def deserialize_guild_role_create_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject ) -> role_events.RoleCreateEvent: """Parse a raw payload from Discord into a guild role create event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. Returns ------- hikari.events.role_events.RoleCreateEvent The parsed guild role create event object. """ @abc.abstractmethod def deserialize_guild_role_update_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject, *, old_role: typing.Optional[guild_models.Role], ) -> role_events.RoleUpdateEvent: """Parse a raw payload from Discord into a guild role update event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. old_role: typing.Optional[hikari.guilds.Role] The role object or `builtins.None`. Returns ------- hikari.events.role_events.RoleUpdateEvent The parsed guild role update event object. """ @abc.abstractmethod def deserialize_guild_role_delete_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject, *, old_role: typing.Optional[guild_models.Role], ) -> role_events.RoleDeleteEvent: """Parse a raw payload from Discord into a guild role delete event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. old_role: typing.Optional[hikari.guilds.Role] The role object or `builtins.None`. Returns ------- hikari.events.role_events.RoleDeleteEvent The parsed guild role delete event object. """ ################### # LIFETIME EVENTS # ################### @abc.abstractmethod def deserialize_starting_event(self) -> lifetime_events.StartingEvent: """Build a starting event object. Returns ------- hikari.events.lifetime_events.StartingEvent The built starting event object. """ @abc.abstractmethod def deserialize_started_event(self) -> lifetime_events.StartedEvent: """Build a started event object. Returns ------- hikari.events.lifetime_events.StartingEvent The built started event object. """ @abc.abstractmethod def deserialize_stopping_event(self) -> lifetime_events.StoppingEvent: """Build a starting event object. Returns ------- hikari.events.lifetime_events.StartingEvent The built starting event object. """ @abc.abstractmethod def deserialize_stopped_event(self) -> lifetime_events.StoppedEvent: """Build a stopped event object. Returns ------- hikari.events.lifetime_events.StartingEvent The built starting event object. """ ################## # MESSAGE EVENTS # ################## @abc.abstractmethod def deserialize_message_create_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject ) -> message_events.MessageCreateEvent: """Parse a raw payload from Discord into a message create event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. Returns ------- hikari.events.message_events.MessageCreateEvent The parsed message create event object. """ @abc.abstractmethod def deserialize_message_update_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject, *, old_message: typing.Optional[messages_models.PartialMessage], ) -> message_events.MessageUpdateEvent: """Parse a raw payload from Discord into a message update event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. old_message: typing.Optional[hikari.messages.PartialMessage] The message object or `builtins.None`. Returns ------- hikari.events.message_events.MessageUpdateEvent The parsed message update event object. """ @abc.abstractmethod def deserialize_message_delete_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject ) -> message_events.MessageDeleteEvent: """Parse a raw payload from Discord into a message delete event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. Returns ------- hikari.events.message_events.MessageDeleteEvent The parsed message delete event object. """ @abc.abstractmethod def deserialize_message_delete_bulk_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject ) -> message_events.MessageDeleteEvent: """Parse a raw payload from Discord into a message delete bulk event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. Returns ------- hikari.events.message_events.MessageDeleteEvent The parsed message delete bulk event object. Raises ------ builtins.NotImplementedError If a bulk delete occurs in a DM channel. """ ################### # REACTION EVENTS # ################### @abc.abstractmethod def deserialize_message_reaction_add_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject ) -> reaction_events.ReactionAddEvent: """Parse a raw payload from Discord into a message reaction add event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. Returns ------- hikari.events.reaction_events.ReactionAddEvent The parsed message reaction add event object. """ @abc.abstractmethod def deserialize_message_reaction_remove_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject ) -> reaction_events.ReactionDeleteEvent: """Parse a raw payload from Discord into a message reaction remove event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. Returns ------- hikari.events.reaction_events.ReactionDeleteEvent The parsed message reaction remove event object. """ @abc.abstractmethod def deserialize_message_reaction_remove_all_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject ) -> reaction_events.ReactionDeleteAllEvent: """Parse a raw payload from Discord into a message reaction remove all event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. Returns ------- hikari.events.reaction_events.ReactionDeleteAllEvent The parsed message reaction remove all event object. """ @abc.abstractmethod def deserialize_message_reaction_remove_emoji_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject ) -> reaction_events.ReactionDeleteEmojiEvent: """Parse a raw payload from Discord into a message reaction remove emoji event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. Returns ------- hikari.events.reaction_events.ReactionDeleteEmojiEvent The parsed message reaction remove emoji event object. """ ################ # SHARD EVENTS # ################ @abc.abstractmethod def deserialize_shard_payload_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject, *, name: str ) -> shard_events.ShardPayloadEvent: """Parse a raw payload from Discord into a shard payload event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. name : builtins.str Name of the event. Returns ------- hikari.events.shard_events.ShardPayloadEvent The parsed shard payload event object. """ @abc.abstractmethod def deserialize_ready_event( self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject, ) -> shard_events.ShardReadyEvent: """Parse a raw payload from Discord into a ready event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. payload : hikari.internal.data_binding.JSONObject The dict payload to parse. Returns ------- hikari.events.shard_events.ShardReadyEvent The parsed ready event object. """ @abc.abstractmethod def deserialize_connected_event(self, shard: gateway_shard.GatewayShard) -> shard_events.ShardConnectedEvent: """Build a shard connected event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. Returns ------- hikari.events.shard_events.ShardReadyEvent The built shard connected event object. """ @abc.abstractmethod def deserialize_disconnected_event(self, shard: gateway_shard.GatewayShard) -> shard_events.ShardDisconnectedEvent: """Build a shard disconnected event object. Parameters ---------- shard : hikari.api.shard.GatewayShard The shard that emitted this event. Returns ------- hikari.events.shard_events.ShardReadyEvent The built shard disconnected event object. """ @abc.abstractmethod def deserialize_resumed_event(self, shard: gateway_shard.GatewayShard) -> shard_events.ShardResumedEvent: """Build a shard resumed event object. Parameters ----------
= {} #-- def pRobotHold(self): """ Hold position of physical robot. Return Value: None. """ if self.IsCommUp(): self.mCmd.CmdStop() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # vHemisson Gui and Support # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #-- def GuiInit(self): """ Initialize vHemisson GUI. """ # Menubar list self.mMenuBarList = Gluon.GluonMenuBarList() self.mMenuBarList.add('Robot|Robot Options...', 'command', callback=self.GuiCbRobotOptions) self.mMenuBarList.add('Robot|Module Options|Linear Camera...', 'command', callback=self.GuiCbRobotOptModLinCam) self.mMenuBarList.add('Robot|Module Options|Text-To-Speech...', 'command', callback=self.GuiCbRobotOptModTts) self.mMenuBarList.add('Robot|Module Options|UltraSonic Sensor...', 'command', callback=self.GuiCbRobotOptModUss) self.mMenuBarList.add('Robot', 'separator') self.mMenuBarList.add('Robot|Connect...', 'command', callback=self.GuiCbRobotConnect, disabledStates={self.mServerId:[ Gluon.EServerState.Ready, Gluon.EServerState.Running, Gluon.EServerState.Paused, Gluon.EServerState.Stepping ]}) self.mMenuBarList.add('Robot|Disconnect', 'command', callback=self.GuiCbRobotDisconnect, disabledStates={self.mServerId:[ Gluon.EServerState.NotLoaded, Gluon.EServerState.NotReady ]}) self.mMenuBarList.add('Robot', 'separator') self.mMenuBarList.add('Robot|Module Scan...', 'command', callback=self.GuiCbRobotModScan) self.mMenuBarList.add('Robot|Calibration|Proximity...', 'command', callback=self.GuiCbRobotCalProximity) self.mMenuBarList.add('Robot|Calibration|Ambient...', 'command', callback=self.GuiCbRobotCalAmbient) self.mMenuBarList.add('Robot', 'separator') self.mMenuBarList.add('Robot|Robot Visualizer', 'command', callback=self.GuiCbRobotViz) self.mMenuBarList.add('Robot|Module Visualizer|Linear Camera', 'command', callback=self.GuiCbRobotVizLinCam) self.mMenuBarList.add('Robot|Module Visualizer|Text-To-Speech', 'command', callback=self.GuiCbRobotVizTts) self.mMenuBarList.add('Robot|Module Visualizer|UltraSonic Sensor', 'command', callback=self.GuiCbRobotVizUss) self.mMenuBarList.add('Robot|Shell', 'command', callback=self.GuiCbRobotShell) self.mMenuBarList.add('Help|About Robot...', 'command', callback=self.GuiCbHelpAbout) # Plugin Toolbar list self.mToolBarList = Gluon.GluonToolBarList() self.mToolBarList.add('connect', self.GuiCbRobotConnect, tooltip='Establish a serial connection with the Hemisson', imagefile=gut.GetFusionImageFileName('SerConn.gif'), altStates={self.mServerId:[ Gluon.EServerState.Ready, Gluon.EServerState.Running, Gluon.EServerState.Paused, Gluon.EServerState.Stepping ]}, alttooltip='Disconnect serial connection to the Hemisson', altimagefile=gut.GetFusionImageFileName('SerDisc.gif')) #-- def GuiDeinit(self): """ Deinitialize GUI objects. """ pass #-- def GuiWinModComCfg(self, runtimeKey, modName): """ Common configuration settings for Module Windows. Parameters: runtimeKey - mOpt run-time key specific to the module. modName - mModScan key specific to module Return Value: {'run_time':<val>, 'module':<val>} """ comcfg = {} if runtimeKey == 'UseTtsEffector': # always enabled comcfg['run_time'] = 'enabled' elif self.mOpt[runtimeKey]: comcfg['run_time'] = 'enabled' else: comcfg['run_time'] = 'disabled' if modName in self.mModScan: comcfg['module'] = 'detected' else: comcfg['module'] = 'not detected' return comcfg #-- def GuiCbRobotOptions(self): """ 'Options' menu callback. """ if __debug__: self.mDbg.d1print('Robot|Options') lastSettings = {} # Get parsed ini configuation (ini guraunteed to exist) ini = self.GSGetIni() section = HemiIniDD.IniDDSectOpts settingNames = GuiDlgHemiOpt.GetSettingNames() iniSettings = ini.IniGetSubItems(section, settingNames) lastSettings = utils.tuples2dict(iniSettings) # get parent gui object for this dialog parent = self.GSGuiGetParent() # the dialog dlg = GuiDlgHemiOpt.GuiDlgHemiOpt(parent, lastSettings=lastSettings) # options have been okay'ed if dlg.result: opts = dlg.result # update ini with current connection settings iniSettings = utils.dict2tuples(opts) ini.IniSetModifiedItems(section, iniSettings) # Re-init settings self.IniInitOpt() # go back to parent gui self.GSGuiRaiseParent() #-- def GuiCbRobotOptModLinCam(self): """ 'LinCam Module Options' menu callback """ if __debug__: self.mDbg.d1print('Robot|Module Options|Linear Camera') # Get parsed ini configuation (ini guraunteed to exist) ini = self.GSGetIni() section = HemiIniDD.IniDDSectLinCam iniSettings = ini.IniGetItems(section) lastSettings = utils.tuples2dict(iniSettings) # get parent gui object for this dialog parent = self.GSGuiGetParent() # the dialog dlg = GuiDlgHemiModLinCam.GuiDlgHemiModLinCam(parent, lastSettings=lastSettings) # options have been okay'ed if dlg.result: opts = dlg.result # update ini with current connection settings iniSettings = utils.dict2tuples(opts) ini.IniSetModifiedItems(section, iniSettings) # Re-init settings self.IniInitLinCam() # go back to parent gui self.GSGuiRaiseParent() #-- def GuiCbRobotOptModTts(self): """ 'TTS Module Options' menu callback """ if __debug__: self.mDbg.d1print('Robot|Module Options|Text-To-Speech') # Get parsed ini configuation (ini guraunteed to exist) ini = self.GSGetIni() section = HemiIniDD.IniDDSectTts iniSettings = ini.IniGetItems(section) lastSettings = utils.tuples2dict(iniSettings) # get parent gui object for this dialog parent = self.GSGuiGetParent() # the dialog dlg = GuiDlgHemiModTts.GuiDlgHemiModTts(parent, lastSettings=lastSettings) # options have been okay'ed if dlg.result: opts = dlg.result # update ini with current connection settings iniSettings = utils.dict2tuples(opts) ini.IniSetModifiedItems(section, iniSettings) # Re-init settings self.IniInitTts() # go back to parent gui self.GSGuiRaiseParent() #-- def GuiCbRobotOptModUss(self): """ 'USS Module Options' menu callback """ if __debug__: self.mDbg.d1print('Robot|Module Options|UltraSonic Sensor') # Get parsed ini configuation (ini guraunteed to exist) ini = self.GSGetIni() section = HemiIniDD.IniDDSectUss iniSettings = ini.IniGetItems(section) lastSettings = utils.tuples2dict(iniSettings) # get parent gui object for this dialog parent = self.GSGuiGetParent() # the dialog dlg = GuiDlgHemiModUss.GuiDlgHemiModUss(parent, lastSettings=lastSettings) # options have been okay'ed if dlg.result: opts = dlg.result # update ini with current connection settings iniSettings = utils.dict2tuples(opts) ini.IniSetModifiedItems(section, iniSettings) # Re-init settings self.IniInitUss() # go back to parent gui self.GSGuiRaiseParent() #-- def GuiCbRobotConnect(self): """ 'Robot|Connect' menu callback. """ if __debug__: self.mDbg.d1print('Robot|Connect') # RDK!!! hack until ToolBar support SM if self.IsCommUp(): self.GuiCbRobotDisconnect() return portHistory = [] lastSettings = {} # Get parsed ini configuation (ini guraunteed to exist) ini = self.GSGetIni() section = HemiIniDD.IniDDSectConn optList = ini.IniGetReItems(section, 'port[0-9]+') for opt,val in optList: portHistory += [val] portHistory.sort() settingNames = GuiDlgSerConn.GetSettingNames() iniSettings = ini.IniGetSubItems(section, settingNames) lastSettings = utils.tuples2dict(iniSettings) # get Hemisson serial port supported values listBaudRates = self.mCmd.GetSupportedBaudRates() listByteSizes = self.mCmd.GetSupportedByteSizes() listParities = self.mCmd.GetSupportedParities() listStopBits = self.mCmd.GetSupportedStopBits() # get parent gui object for this dialog parent = self.GSGuiGetParent() # the dialog dlg = GuiDlgSerConn.GuiDlgSerConn(parent, self.mCmd.Open, portHistory=portHistory, lastSettings=lastSettings, validBaudRates=listBaudRates, validByteSizes=listByteSizes, validParities=listParities, validStopBits=listStopBits) # Serial connection has been successfully opened if dlg.result: curOpened = dlg.result # update ini with current connection settings iniSettings = utils.dict2tuples(curOpened) ini.IniSetModifiedItems(section, iniSettings) # update port history isNewPort = True for val in portHistory: if val == curOpened['port']: isNewPort = False break if isNewPort: ini.IniRemoveReOptions(section, 'port[0-9]+') i = 1 for val in portHistory: ini.IniSet(section, "port%d" % i, val) i += 1 ini.IniSet(section, "port%d" % i, curOpened['port']) # Re-init settings self.IniInitConn() # Set new communication status self.SetCommStatus(True) # flush any residuals self.mCmd.FlushInput() # Scan for modules, setting data a necessary self.pRobotModScan() # go back to parent gui self.GSGuiRaiseParent() #-- def GuiCbRobotDisconnect(self): """ 'Disonnect' menu callback. """ if __debug__: self.mDbg.d1print('Robot|Disconnect') self.SetCommStatus(False) self.mCmd.Close() #-- def GuiCbRobotModScan(self): """ 'Module Scan' menu callback """ if __debug__: self.mDbg.d1print('Robot|Modules|Scan') self.pRobotModScan() #[('Text-To-Speech', 'T', 196, 6), ('UltraSonic Sensor', 'U', 224, 8)] hdr = 'Module Id I2C Version\n' + \ '------ -- ---- -------\n' txt = '' for modinfo in self.mModScan.values(): txt += modinfo['modname'] sp = 26 - len(modinfo['modname']) txt += '%*s' %(sp, modinfo['mid']) sp = 4 txt += '%*s0x%02x' %(sp, '', modinfo['i2c_addr']) sp = 5 txt += '%*sv%d\n' %(sp, '', modinfo['ver']) if not txt: txt = 'Module scan is unavailable.\n' # get parent gui object for this dialog parent = self.GSGuiGetParent() GuiDlgAbout.GuiDlgAbout(parent, name=self.HasName() + ' Attached I' + \ gt.UniSuperscript['2'] + 'C Modules', desc=hdr+txt) # go back to parent gui self.GSGuiRaiseParent() #-- def GuiCbRobotCalProximity(self): """ 'Proximity Sensor Calibration' menu callback """ if __debug__: self.mDbg.d1print('Robot|Calibration|Proximity') sensorMimeType = hvals.HemiSensorMimeTypeProximity sensorName = 'Proximity' # get parsed ini configuation (ini guraunteed to exist) ini = self.GSGetIni() section = HemiIniDD.IniDDSectProximity iniSettings = ini.IniGetItems(section) lastSettings = utils.tuples2dict(iniSettings) factSettings = { 'KBrightnessMin': HemiBase.HemiIrProxMinKBrightness, 'KBrightnessMax': HemiBase.HemiIrProxMaxKBrightness, 'KBrightnessDft': HemiBase.HemiIrProxDftKBrightness, 'NoiseFloorMin': HemiBase.HemiIrProxMinNoiseFloor, 'NoiseFloorMax': HemiBase.HemiIrProxMaxNoiseFloor, 'NoiseFloorDft': HemiBase.HemiIrProxDftNoiseFloor, 'MaxDist': HemiBase.HemiIrProxMaxDist * 2 # for graphing } # get parent gui object for this dialog parent = self.GSGuiGetParent() # the dialog dlg = GuiDlgHemiCalIrLed.GuiDlgHemiCalIrLed(parent, sensorMimeType, sensorName, self.mCmd.ProximitySensorGetDftCalibration, factSettings, lastSettings) # options have been okay'ed if dlg.result: # update ini with current connection settings iniSettings = utils.dict2tuples(dlg.result) ini.IniSetModifiedItems(section, iniSettings) # re-init settings self.IniInitProximity() # go back to parent gui self.GSGuiRaiseParent() #-- def GuiCbRobotCalAmbient(self): """ 'Ambient Sensor Calibration' menu callback """ if __debug__: self.mDbg.d1print('Robot|Calibration|Ambient') sensorMimeType = hvals.HemiSensorMimeTypeAmbient sensorName = 'Ambient' # get parsed ini configuation (ini guraunteed to exist) ini = self.GSGetIni() section = HemiIniDD.IniDDSectAmbient iniSettings = ini.IniGetItems(section) lastSettings = utils.tuples2dict(iniSettings) factSettings = { 'KBrightnessMin': HemiBase.HemiIrAmbMinKBrightness, 'KBrightnessMax': HemiBase.HemiIrAmbMaxKBrightness, 'KBrightnessDft': HemiBase.HemiIrAmbDftKBrightness, 'NoiseFloorMin': HemiBase.HemiIrAmbMinNoiseFloor, 'NoiseFloorMax': HemiBase.HemiIrAmbMaxNoiseFloor, 'NoiseFloorDft': HemiBase.HemiIrAmbDftNoiseFloor, 'MaxDist': HemiBase.HemiIrAmbMaxDist # for graphing } # get parent gui object for this dialog parent = self.GSGuiGetParent() # the dialog dlg = GuiDlgHemiCalIrLed.GuiDlgHemiCalIrLed(parent, sensorMimeType, sensorName, self.mCmd.AmbientSensorGetDftCalibration, factSettings, lastSettings) # options have been okay'ed if dlg.result: # update ini with current connection settings iniSettings = utils.dict2tuples(dlg.result) ini.IniSetModifiedItems(section, iniSettings) # re-init settings self.IniInitAmbient() # go back to parent gui self.GSGuiRaiseParent() #-- def GuiCbRobotViz(self): """ 'Visualize Robot' menu callback. """ if __debug__: self.mDbg.d1print('Robot|Robot Visualizer') msgbox.WarningBox('Not implemented yet.') #-- def GuiCbRobotVizLinCam(self): """ 'Visualize Linear Camera Module' menu callback. """ if __debug__: self.mDbg.d1print('Robot|Module Vizualizer|Linear Camers') win = self.GSGuiWinStart('VizLinCam', # window ID GuiWinHemiVizLinCam.GuiWinHemiVizLinCam, # start object sense_lincam=self.SenseLinCam) # arguments to
import copy from rlcard.games.doudizhu.utils import CARD_TYPE #from douzero.dmc.utils import act EnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 17: '2', 20: 'B', 30: 'R'} RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'B': 20, 'R': 30} INDEX = {'3': 0, '4': 1, '5': 2, '6': 3, '7': 4, '8': 5, '9': 6, 'T': 7, 'J': 8, 'Q': 9, 'K': 10, 'A': 11, '2': 12, 'B': 13, 'R': 14} def get_best_actions(hand_cards, last_two_moves): last_move, two_moves_ago = last_two_moves lm = last_move if len(last_move) == 0: lm = two_moves_ago combinations = get_combinations(env_arr2real_card_str(hand_cards)) moves = [] if len(lm) == 0: moves = get_best_leading_moves(hand_cards, combinations) else: moves = get_best_following_moves(hand_cards, combinations, lm) return moves # Get the best combinations from your hand. # TODO generate all chains, trios recursively and the solos / pairs that arise from that because # TODO sometimes these may result in better plays to play a "worse" combos def get_combinations(hand): '''Get optimal combinations of cards in hand ''' comb = {'rocket': [], 'bomb': [], 'trio': [], 'trio_kickers': [], 'trio_chain': [], 'solo_chain': [], 'pair_chain': [], 'pair': [], 'solo': []} # 1. pick rocket if hand[-2:] == 'BR': comb['rocket'].append('BR') hand = hand[:-2] # 2. pick bomb hand_cp = hand for index in range(len(hand_cp) - 3): if hand_cp[index] == hand_cp[index + 3]: bomb = hand_cp[index: index + 4] comb['bomb'].append(bomb) hand = hand.replace(bomb, '') # 3. pick trio and trio_chain hand_cp = hand for index in range(len(hand_cp) - 2): if hand_cp[index] == hand_cp[index + 2]: trio = hand_cp[index: index + 3] if len(comb['trio']) > 0 and INDEX[trio[-1]] < 12 and (INDEX[trio[-1]] - 1) == INDEX[comb['trio'][-1][-1]]: comb['trio'][-1] += trio else: comb['trio'].append(trio) hand = hand.replace(trio, '') only_trio = [] only_trio_chain = [] for trio in comb['trio']: if len(trio) == 3: only_trio.append(trio) else: only_trio_chain.append(trio) comb['trio'] = only_trio comb['trio_chain'] = only_trio_chain hand_list = card_str2count_list(hand) # The following two combos are the change from V1. # 4. Pick Non Disruptive Pair Chains chains, hand_list = pick_non_disruptive_pair_chains(hand_list) comb['pair_chain'] = chains # 5. Pick solo chains chains, hand_list = pick_chain_v2(hand_list, 1) comb['solo_chain'] = chains # update hand again with new hand list hand = count_list2card_str(hand_list) # 6. pick pair and solo index = 0 while index < len(hand) - 1: if hand[index] == hand[index + 1]: comb['pair'].append(hand[index] + hand[index + 1]) index += 2 else: comb['solo'].append(hand[index]) index += 1 if index == (len(hand) - 1): comb['solo'].append(hand[index]) # 7. Add lowest solo and pairs to trios solosAndPairs = comb['solo'] + comb['pair'] # sort by rank solosAndPairs.sort(key=lambda acs: int( CARD_TYPE[0][acs][0][1]), reverse=True) # reverse trio so we can pop trios we've added kickers to off the back comb['trio'].reverse() popFromTrioCount = 0 # add the lowest kickers to each each trio_kicker for i in range(len(comb['trio'])): if len(solosAndPairs) > 0: el = solosAndPairs.pop() # sort the trio so that the lower rank cards come first new_acs = comb['trio'][i] + el popFromTrioCount += 1 new_ac = real_card_str2env_arr(new_acs) new_ac.sort() comb['trio_kickers'].append(env_arr2real_card_str(new_ac)) for i in range(popFromTrioCount): comb['trio'].pop() # put the lists back in their normal order comb['trio'].reverse() return comb def getFirstAndLastArr(arr): if len(arr) < 2: return arr return arr[::len(arr) - 1] def convertActionListArr(arr): return list(map(lambda tuple: (real_card_str2env_arr(tuple[0]), real_card_str2env_arr(tuple[1])), arr)) def getNextHandTupleArr(hand, action_strs): def filterHand(action_str): hand_str = env_arr2real_card_str(hand) for c in action_str: hand_str = hand_str.replace(c, '') return (action_str, hand_str) return list(map(filterHand, action_strs)) def formatResultTuple(hand, action_strs): return convertActionListArr(getNextHandTupleArr(hand, action_strs)) # Get a prioritzed array of all the best moves from your combinations def get_best_leading_moves(hand, combinations): action_strs = combinations['trio_chain'] + combinations['pair_chain'] + combinations['solo_chain'] \ + getFirstAndLastArr(combinations['trio_kickers']) + getFirstAndLastArr(combinations['trio']) \ + getFirstAndLastArr(combinations['pair']) + getFirstAndLastArr(combinations['solo']) + \ combinations['bomb'] + combinations['rocket'] result = formatResultTuple(hand, action_strs) return result TRIO_CHAIN = 'trio_chain' PAIR_CHAIN = 'pair_chain' SOLO_CHAIN = 'solo_chain' TRIO = 'trio' TRIO_SOLO = 'trio_solo' TRIO_PAIR = 'trio_pair' PAIR = 'pair' SOLO = 'solo' # the idea behind this is we don't want to break up our best hands, we would rather pass # Additionally, playing your highest ranked hands may be advantageous over lowest ranked to control # the board sometimes. def get_best_following_moves(hand, combinations, last_move): last_move_str = env_arr2real_card_str(last_move) the_type, last_rank = CARD_TYPE[0][last_move_str][0] last_rank = int(last_rank) moves = [] if PAIR_CHAIN in the_type: for move in combinations['pair_chain']: if len(move) >= len(last_move): new_move = move[len(move) - len(last_move):] this_rank = int(CARD_TYPE[0][new_move][0][1]) if this_rank > last_rank: moves.append(new_move) elif SOLO_CHAIN in the_type: for move in combinations['solo_chain']: if len(move) >= len(last_move): new_move = move[len(move) - len(last_move):] this_rank = int(CARD_TYPE[0][new_move][0][1]) if this_rank > last_rank: moves.append(new_move) elif TRIO == the_type: for move in combinations['trio']: this_rank = int(CARD_TYPE[0][move][0][1]) if this_rank > last_rank: moves.append(move) elif TRIO_SOLO == the_type: for move in combinations['trio']: this_rank = int(CARD_TYPE[0][move][0][1]) if this_rank > last_rank and len(combinations['solo']) > 0: trio = move + combinations['solo'][0] trio_arr = real_card_str2env_arr(trio) trio_arr.sort() moves.append(env_arr2real_card_str(trio_arr)) elif TRIO_PAIR == the_type: for move in combinations['trio']: this_rank = int(CARD_TYPE[0][move][0][1]) if this_rank > last_rank and len(combinations['pair']) > 0: trio = move + combinations['pair'][0] trio_arr = real_card_str2env_arr(trio) trio_arr.sort() moves.append(env_arr2real_card_str(trio_arr)) elif PAIR == the_type: for move in combinations['pair']: this_rank = int(CARD_TYPE[0][move][0][1]) if this_rank > last_rank: moves.append(move) elif SOLO == the_type: for move in combinations['solo']: this_rank = int(CARD_TYPE[0][move][0][1]) if this_rank > last_rank: moves.append(move) legal_bombs = [] if 'bomb' == the_type: for move in combinations['bomb']: this_rank = int(CARD_TYPE[0][move][0][1]) if this_rank > last_rank: legal_bombs.append(move) elif 'rocket' != the_type: legal_bombs = combinations['bomb'] # prior_moves = copy.copy(moves) moves = getFirstAndLastArr(moves) + legal_bombs + combinations['rocket'] # if env_arr2real_card_str([3, 4, 6, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14]) == env_arr2real_card_str(hand): # print('hand', hand) # print('lm', last_move) # print('type', the_type) # print('combos', combinations) # print('prior_moves', prior_moves) # print('moves', moves) if len(moves) > 0: # convert to actions arrays and add pass as a valid move return formatResultTuple(hand, moves) + [([], hand)] if the_type == SOLO: legal_solos = [] for i, move in enumerate(env_arr2real_card_str(hand)): this_rank = int(CARD_TYPE[0][move][0][1]) if this_rank > last_rank and move not in legal_solos: legal_solos.append(move) result = formatResultTuple(hand, legal_solos) + [([], hand)] return result if the_type == PAIR: legal_pairs = [] for i, card in enumerate(hand): # check if pair if i < len(hand) - 1 and card == hand[i + 1]: # create move move = env_arr2real_card_str([card, card]) this_rank = int(CARD_TYPE[0][move][0][1]) if this_rank > last_rank and move not in legal_pairs: legal_pairs.append(move) return formatResultTuple(hand, legal_pairs) + [([], hand)] # pass this turn since there aren't any legal actions return [([], hand)] # Pick pair chains which results in removing more cards from hand than by playing the solo straight # surrounding it. def pick_non_disruptive_pair_chains(hand_list): solo_chains, solo_handlist = pick_chain_v2(hand_list, 1) pair_chains, pair_handlist = pick_chain_v2(hand_list, 2) if sum(pair_handlist) < sum(solo_handlist): return (pair_chains, pair_handlist) # Return original handlist if there are no good pair chains to pick return ([], hand_list) def card_str2count_list(hand): hand_list = [0 for _ in range(15)] for card in hand: hand_list[INDEX[card]] += 1 return hand_list def count_list2card_str(hand_list): card_str = '' # print('v1', hand_list) cards = [card for card in INDEX] for index, count in enumerate(hand_list): card_str += cards[index] * count return card_str def env_arr2real_card_str(ac): _ac = ac.copy() for i, c in enumerate(_ac): _ac[i] = EnvCard2RealCard[c] return ''.join(_ac) def real_card_str2env_arr(ac): _ac = [] for c in ac: _ac.append(RealCard2EnvCard[c]) return _ac def pick_chain(hand_list, count): chains = [] str_card = [card for card in INDEX] hand_list = [str(card) for card in hand_list] hand = ''.join(hand_list[:12]) chain_list = hand.split('0') add = 0 for index, chain in enumerate(chain_list): if len(chain) > 0: if len(chain) >= 5: start = index + add min_count = int(min(chain)) // count if min_count != 0: str_chain = '' for num in range(len(chain)): str_chain += str_card[start + num] hand_list[start + num] = int(hand_list[start + num]) - int(min(chain)) for _ in range(min_count): chains.append(str_chain) add += len(chain) hand_list = [int(card) for card in hand_list] return (chains, hand_list) def pick_chain_v2(hand_list, count): chains = [] str_card = [card for card in INDEX] hand_list = [str(card) for card in hand_list] hand = ''.join(hand_list[:12]) formatted_hand = hand # based on count, turn all cards that break chain to 0 so
t2, ''' <a href="#" onclick="show_hideStuff('detailed_data'); return false;"> <br><br><hr><br> <h3>Detailed Data (click to see or hide)</h3></a><br> <div id="detailed_data" style="display:none"> ''' # last xx tweets is response limited to 180 res_last200_tweets = get_last200_tweets(user_to_check.lower()) #print '<p>', type(res_last200_tweets), len(res_last200_tweets), '</p>' final_tweet_list = [] final_data_for_plots = [] do_rest_of_display_data = 0 try: user_reference = res_last200_tweets[0] tweet_last200_tweets = res_last200_tweets[1] final_tweet_list.append(tweet_last200_tweets) final_data_for_plots.append(res_last200_tweets[2]) do_rest_of_display_data = 1 except: print '<p>Something wrong to get the list of twitter IDs</p>' if (do_rest_of_display_data == 1): print >> t2, '<br>' try: if len(user_reference) > 0: for newuser in user_reference: if newuser != user_to_check: res_last200_tweets = get_last200_tweets(newuser.lower()) tweets_from_res_last200 = res_last200_tweets[1] final_tweet_list.append(tweets_from_res_last200) final_data_for_plots.append(res_last200_tweets[2]) else: print >>t2, '<p>', 'Did not find any instance of other users referenced in your tweets.' ,'</p>' except: print >>t2, '<p>', 'No info found.' ,'</p>' #Add the data to the temp file also print >> t2, '<br><br><hr><h4>List of Tweets Analyzed</h4>' print >> t2, '<table id="table1" class="pure-table" width=100% style="display: block;">' print >> t2, '<thead><tr bgcolor=#def><td>Date</td><td>Sender</td><td>Text</td></tr></thead>' row_even = True for i1 in final_tweet_list: for i2 in i1: #database fields: current date, username, screen name, twt_date, twt_writer, twt_text twts = [datetime.date.today(),scn_name,user_to_check,i2[0],text_sanitize(i2[1]),text_sanitize(i2[2])] try: if row_even == True: print >> t2, '<tr><td><sm>', twts[3] ,'</sm></td><td><sm>', str(twts[4]),'</sm></td><td><sm>', str(twts[5]),'</sm></td></tr>' row_even = False else: print >> t2, '<tr class="pure-table-odd"><td><sm>', twts[3] ,'</sm></td><td><sm>', str(twts[4]),'</sm></td><td><sm>', str(twts[5]),'</sm></td></tr>' row_even = True except: print '', print >> t2, '</table>' #print out the chart data #data fields: screen_name, friends, followers, msgs, daterange, tweets, retweets #print json.dumps(final_data_for_plots,indent=2) #try doing a chart #draw a chart showing friends and followers print '<h3>Friends and Followers</h3>' x_fdfp = [] y1_fdfp = [] y2_fdfp = [] #print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>' x_fdfp.append( 'Screen Name' ) y1_fdfp.append( 'Friends' ) y2_fdfp.append( 'Followers' ) for xy1 in range(len(final_data_for_plots)): x_fdfp.append( final_data_for_plots[xy1][0] ) y1_fdfp.append( final_data_for_plots[xy1][1] ) y2_fdfp.append( final_data_for_plots[xy1][2] ) two_bar_chart_data("Friends and Followers", x_fdfp, y1_fdfp, y2_fdfp) print '<h3>Followers to Friends Ratio</h3>' #Draw a bar chart to show followers to friends ratio x_fdfp = [] y_fdfp = [] #print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>' for xy1 in range(len(final_data_for_plots)): x_fdfp.append( final_data_for_plots[xy1][0] ) y_fdfp.append( round( 1.0 * final_data_for_plots[xy1][2] / max(final_data_for_plots[xy1][1],1),1) ) #print '<p>',x_fdfp, y_fdfp, '</p>' bar_chart_data("Followers to Friends Ratio", x_fdfp, y_fdfp) print '<h3>Tweets sent per day</h3>' x_fdfp = [] y1_fdfp = [] y2_fdfp = [] #print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>' x_fdfp.append( 'Screen Name' ) y1_fdfp.append( 'Tweets per day - with retweets' ) y2_fdfp.append( 'Tweets per day - without retweets' ) for xy1 in range(len(final_data_for_plots)): x_fdfp.append( final_data_for_plots[xy1][0] ) y1_fdfp.append( final_data_for_plots[xy1][5] / max(final_data_for_plots[xy1][4],1) ) y2_fdfp.append( (final_data_for_plots[xy1][5]-final_data_for_plots[xy1][6]) / max(final_data_for_plots[xy1][4],1) ) two_bar_chart_data("Tweets sent per day", x_fdfp, y1_fdfp, y2_fdfp) print '<h3>Tweet range (tweets seen per day)</h3>' x_fdfp = [] y_fdfp = [] #print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>' for xy1 in range(len(final_data_for_plots)): x_fdfp.append( final_data_for_plots[xy1][0] ) y_fdfp.append( round( 1.0 * final_data_for_plots[xy1][2] * final_data_for_plots[xy1][5] / max(final_data_for_plots[xy1][4],1) ) ) #print '<p>',x_fdfp, y_fdfp, '</p>' bar_chart_data("Tweet Range", x_fdfp, y_fdfp) lex_anal(final_tweet_list) #print out the detailed data # go to the first record of the temp file first print >> t2, ' </div> ' t2.seek(0) print t2.read() t2.close() #if this works - can delete below this. else: print '<p>Not able to process this user. Please try another.</p>' print '</div>' #close the body_sty div html_end() def lex_anal(incomingTweetList): ''' routine to do lexical analysis ''' #final_tweet_list --- date / sender full name / tweet #read the tweets and create a list of sender-htag and sender-@ #incoming TweetList has two layer lists sender_htag = [] sender_at = [] h_tags_all = [] at_items_all = [] ts_all = [] for lex2 in incomingTweetList: for lex22 in lex2: td = lex22[0] #this is the tweet date try: ts = text_sanitize(lex22[1]) #this is the tweet sender except: print 'something wrong with ',lex22[1] ts = '---' ts_all.append(ts) h_tags = re.findall('[#]\w+',lex22[2]) #these are the h-tags at_items = re.findall('[@]\w+',lex22[2]) #these are the other users h_tags = [hti.lower() for hti in h_tags] at_items = [ati.lower() for ati in at_items] for h2 in h_tags: sender_htag.append([td,ts.lower()+'-'+h2]) h_tags_all.append(h2) for at2 in at_items: sender_at.append([td,ts.lower()+'-'+at2]) at_items_all.append(at2) #summarize the two new lists #following lists don't have dates sender_htag2 = [xx[1] for xx in sender_htag] sender_at2 = [yy[1] for yy in sender_at] #make a list of the tweet senders only ts_all = list(set(ts_all)) #print ts_all #get the top 10 htags #py2.6 ht_col = collections.Counter(h_tags_all) htag_data4heatmap = [] at_data4heatmap = [] #print '<ul>Top 10 Hashtags' #py2.6 for h_item in ht_col.most_common(10): for h_item in top_list(h_tags_all,10): #print '<li>', h_item, '</li>' #count the number of times each of the hastag was referenced by each tweet sender try: for tsitem in ts_all: try: itemtocount = str(tsitem+'-'+h_item[1]) htag_data4heatmap.append([tsitem,h_item[1], sender_htag2.count(itemtocount)]) except: print 'Problem here: ',h_item,tsitem except: print 'Problem here',h_item print '</ul>' #get the top 10 user references #py2.6 at_col = collections.Counter(at_items_all) #print '<ul>Top 10 Users' #py2.6 for a_item in at_col.most_common(10): for a_item in top_list(at_items_all,10): #print '<li>', a_item, '</li>' #count the number of times each of the hastag was referenced by each tweet sender try: for tsitem in ts_all: itemtocount = str(tsitem+'-'+a_item[1]) at_data4heatmap.append([tsitem,a_item[1], sender_at2.count(itemtocount)]) except: print 'Problem here 2',a_item print '</ul>' #draw the table with the heatmap tcols = len(ts_all) #number of tweet senders - rows trows = len(htag_data4heatmap) / tcols #number of hastags - cols #print trows, tcols if trows>0: print '<br><br>' print '<h3>Most Popular Hashtags</h3>' heatmap_table(trows,tcols,htag_data4heatmap) tcols = len(ts_all) #number of tweet senders - rows trows = len(at_data4heatmap) / tcols #number of hastags - cols #print trows, tcols if trows>0: print '<br><br>' print '<h3>Most Referenced Users</h3>' heatmap_table(trows,tcols,at_data4heatmap) def heatmap_table(trows,tcols,hm): #calculate the max and min of the references #and create a normalized color scale mx = max(i[2] for i in hm) mn = min(i[2] for i in hm) itv = mx - mn #COLOR pallete from http://colorbrewer2.org/ for arow in hm: rval = 1.0*arow[2]/itv if rval<0.1: arow[2]='#FFF5F0' elif rval>=0.1 and rval<0.25: arow[2]='#FEE0D2' elif rval>=0.25 and rval<0.4: arow[2]='#FCBBA1' elif rval>=0.4 and rval<0.5: arow[2]='#FC9272' elif rval>=0.5 and rval<0.6: arow[2]='#FB6A4A' elif rval>=0.6 and rval<0.7: arow[2]='#EF3B2C' elif rval>=0.7 and rval<0.8: arow[2]='#CB181D' elif rval>=0.8 and rval<0.9: arow[2]='#A50F15' elif rval>=0.9: arow[2]='#67000D' print '<table width=100% style="display: block;"> ' for i in range(trows+1): print '<tr>', for j in range(tcols+1): if (i==0 and j==0): print '<td width="15%">','','</td>', elif i==0 and j>0 and j<(tcols): print '<td width="8.5%"><sm>',hm[j-1][0][:10],'</sm></td>', elif i==0 and j==(tcols): print '<td width="8.5%"><sm>',hm[j-1][0][:10],'</sm></td></tr>' elif i>0 and j==0: print '<td><sm>',hm[(i-1)*tcols+j+1-1][1],'</sm></td>', elif i>0 and j>0 and j<tcols: print '<td bgcolor=',hm[(i-1)*tcols+j-1][2],'></td>', elif i>0 and j==tcols: print '<td bgcolor=',hm[(i-1)*tcols+j-1][2],'></td></tr>' print '</table> ' def print_detailed_tweets(in_usertocheck): html_start() check_another_user_button() #print '<h3>Listing of tweets analyzed:</h3>' sd2st = start_database_to_store_tweets() if sd2st[1] == True: c2 = sd2st[0] conn2 = sd2st[2] #read all the tweets for the username and screen name read_text = "SELECT * FROM tweetlist WHERE (username =\'"+in_usertocheck+"\')" #print '<p>Select tweet command:',read_text,'</p>' try: c2.execute(read_text) for crow in c2: print crow[1] conn2.close() #print '<h2>Finished with the tweet list</h2>' except conn2.Error, e: print "E Error %d: %s" % (e.args[0], e.args[1]) else: print "F Error %d: %s" % (sd2st[0].args[0],sd2st[0].args[1]) html_end() def bar_chart_data(cht_title,xdata,ydata): #this routine will draw a bar chart #print '<p>DO NOT PRINT anaything inside chart modules except needed items</p>' print '<!--Load the AJAX API-->' print '<script type=\"text/javascript\" src=\"https://www.google.com/jsapi\"></script>' print '<script type=\"text/javascript\">' # Load the Visualization API and the piechart package. print ' google.load(\'visualization\', \'1.0\', {\'packages\':[\'corechart\']}); ' # Set a callback to run when the Google Visualization API is loaded. print ' google.setOnLoadCallback(drawChart);' # Callback that creates and populates a data table, # instantiates the pie chart, passes in the data and # draws it. print ' function drawChart() { ' # Create the data table. print ' var data = new google.visualization.arrayToDataTable([ ' print ' [ \'Screen Name\', \' ' , cht_title, ' \', {role:\'style\'} ], ' for cdi in range(len(xdata)): if cdi == 0: print " [ \'", xdata[cdi], "\',", ydata[cdi], ", \'orange\' ], " else: print " [ \'", xdata[cdi], "\',", ydata[cdi], ", \'blue\' ], " print ' ]); ' #Set chart options print "
# Interface for loading preprocessed fMRI data and confounds table from os.path import exists from bids import BIDSLayout from nipype.interfaces.io import IOBase from nipype.utils.filemanip import copyfile from nipype.interfaces.base import (BaseInterfaceInputSpec, SimpleInterface, traits, TraitedSpec, Directory, Str, ImageFile, OutputMultiPath) from traits.trait_base import Undefined from traits.trait_types import Dict, List, Either, File, Int from fmridenoise.pipelines import load_pipeline_from_json, is_IcaAROMA import json import os from itertools import product import typing as t from fmridenoise.utils.entities import build_path, parse_file_entities_with_pipelines import logging logger = logging.getLogger(__name__) def _lists_to_entities(subjects: list, tasks: list, sessions: t.List[str], runs: t.List[str]): """ Convert lists of subjects, tasks and sessions into list of dictionaries (entities). It handles empty session list. """ keys = ['subject', 'task'] prod_elements = [subjects, tasks] if sessions: keys.append('session') prod_elements.append(sessions) if runs: keys.append('run') prod_elements.append(runs) return [{key: value for key, value in zip(keys, entity)} for entity in product(*prod_elements)] def _fill_empty_lists(layout: BIDSLayout, subjects: list, tasks: list, sessions: list, runs: t.List[str]): """ If filters are not provided by the user, load them from layout. """ subjects = subjects if subjects else layout.get_subjects() tasks = tasks if tasks else layout.get_tasks() sessions = sessions if sessions else layout.get_sessions() runs = runs if runs else layout.get_runs() return subjects, tasks, sessions, runs class MissingFile(IOError): pass class BIDSGrabInputSpec(BaseInterfaceInputSpec): # TODO: Check this inteface, why are there 'either file or list'? ~Mateusz fmri_prep_files = List() fmri_prep_aroma_files = Either(List(ImageFile()), File()) conf_raw_files = Either(List(File(exists=True)), File(exists=True)) conf_json_files = Either(List(File(exists=True)), File(exists=True)) subject = Str() task = Str() session = Str() run = Int() class BIDSGrabOutputSpec(TraitedSpec): fmri_prep = ImageFile() fmri_prep_aroma = ImageFile() conf_raw = File(exists=True) conf_json = File(exists=True) class BIDSGrab(SimpleInterface): """ For each list of paths selects one file for given set of parameters - subject, session, task. """ input_spec = BIDSGrabInputSpec output_spec = BIDSGrabOutputSpec def _run_interface(self, runtime): if self.inputs.fmri_prep_files != Undefined: self._results['fmri_prep'] = self._select_one(self.inputs.fmri_prep_files) if self.inputs.fmri_prep_aroma_files != Undefined: self._results['fmri_prep_aroma'] = self._select_one(self.inputs.fmri_prep_aroma_files) self._results['conf_raw'] = self._select_one(self.inputs.conf_raw_files) self._results['conf_json'] = self._select_one(self.inputs.conf_json_files) return runtime def _select_one(self, _list: t.List[str]) -> str: """ Wrapper for select_one that uses class instance variable. Args: _list (List[str]): list of file paths Returns: str: resulting file path meeting criteria """ return self.select_one(_list, subject=self.inputs.subject, session=self.inputs.session, task=self.inputs.task, run=self.inputs.run) @staticmethod def select_one(_list: t.List[str], subject: str, task: str, session: str, run: str) -> str: """ For given list of file paths returns one path for given subject, session and task. If no paths meet criteria empty string is returned instead. If more than one path is found ValueError is raised. Args: _list (List[str]): list of file paths subject (str): subject identifier without 'sub-' session (str): session identifier without 'ses-' task (str): task identifier without 'task-' run (str): run identifier without 'run-' Returns: str: resulting file path meeting criteria """ filters = [lambda x: f"sub-{subject}" in x, lambda x: f"task-{task}" in x] if session: filters.append(lambda x: f"ses-{session}" in x) if run: filters.append(lambda x: f"run-{run}" in x) result = _list for fil in filters: result = filter(fil, result) result = list(result) if not len(result) <= 1: raise ValueError(f"Unambiguous number of querried files, expected 1 or 0 but got {len(result)}") return result[0] if len(result) == 1 else '' class BIDSValidateInputSpec(BaseInterfaceInputSpec): # Root directory only required argument bids_dir = Directory( exists=True, required=True, desc='BIDS dataset root directory' ) # Default: 'fmriprep' derivatives = traits.List(desc='Specifies name of derivatives directory') # Separate queries from user tasks = traits.List(Str(), desc='Names of tasks to denoise') sessions = traits.List(Str(), desc='Labels of sessions to denoise') subjects = traits.List(Str(), desc='Labels of subjects to denoise') runs = traits.List(Int(), desc='Labels of runs to denoise') # Pipelines from user or default pipelines = traits.List( File(), desc='List of paths to selected pipelines' ) class BIDSValidateOutputSpec(TraitedSpec): # Goes to BIDSGrab (whole lists) fmri_prep = traits.List(File) fmri_prep_aroma = traits.List(File) conf_raw = traits.List(File) conf_json = traits.List(File) tasks = traits.List(Str) sessions = traits.List(Str) subjects = traits.List(Str) runs = traits.List(trait=Int()) # Outputs pipelines loaded as dicts pipelines = traits.List(Dict) # Goes to Denoiser tr_dict = traits.Dict() class BIDSValidate(SimpleInterface): """ Interface responsible for calling BIDSLayout and validating file structure. It should output to: - layout (-> BIDSGrab) - task, session, subject (-> iterNodes) - pipeline (-> ?) - tr_dict (-> Denoiser) It should raise exception when: - user specified incorrect flags (there are no matching files) - some files are missing e.g. these for AROMA pipeline, when it is required """ input_spec = BIDSValidateInputSpec output_spec = BIDSValidateOutputSpec @staticmethod def validate_derivatives(bids_dir: str, derivatives: t.Union[str, t.List[str]]) -> t.Tuple[t.List[str], t.List[str]]: """ Validate derivatives argument provided by the user before calling layout. It creates required full path for derivatives directory. Also returns scope required for queries. Args: bids_dir (str): Path to bids root directory. derivatives (Union[str, List[str]]): str or list(str) Derivatives to use for denoising. Returns: derivatives_valid (list): Validated derivatives list. scope (list): Right scope keyword used in pybids query. """ if isinstance(derivatives, str): derivatives_valid = [derivatives] else: derivatives_valid = derivatives # Create full paths to derivatives folders derivatives_valid = [os.path.join(bids_dir, 'derivatives', d) for d in derivatives_valid] # Establish right scope keyword for arbitrary packages scope = [] for derivative_path in derivatives_valid: dataset_desc_path = os.path.join(derivative_path, 'dataset_description.json') if exists(dataset_desc_path): with open(dataset_desc_path, 'r') as f: dataset_desc = json.load(f) else: raise MissingFile(f"{derivative_path} should contain" + " dataset_description.json file") try: major, minor, patch = (int(element) for element in str(dataset_desc['BIDSVersion']).split('.')) except Exception: raise Exception(f"Unable to parse bids version ({dataset_desc['BIDSVersion']}) into 3 parts") if major == 1 and minor <= 3: try: scope.append(dataset_desc['PipelineDescription']['Name']) except KeyError as e: raise KeyError("Key 'PipelineDescription.Name' is " f"required in {dataset_desc_path} file") from e else: pipeline = None try: for pipeline in dataset_desc['GeneratedBy']: scope.append(pipeline['Name']) except KeyError as e: raise KeyError(f"Unable to extract Name from GeneratedBy: {pipeline} in file {dataset_desc_path}") return derivatives_valid, scope @staticmethod def get_entity_files(layout: BIDSLayout, include_no_aroma: bool, include_aroma: bool, entity: dict) -> tuple: """ Checks if all required files are present for single entity defined by subject, session and task labels. If include_aroma is True also checks for AROMA file. Note that session argument can be undefined. Args: Returns: (missing: Union[bool, dict], dict) """ filter_fmri = { 'extension': ['nii', 'nii.gz'], 'suffix': 'bold', 'desc': 'preproc', 'space': 'MNI152NLin2009cAsym' } filter_fmri_aroma = { 'extension': ['nii', 'nii.gz'], 'suffix': 'bold', 'desc': 'smoothAROMAnonaggr', # 'space': 'MNI152NLin2009cAsym' } filter_conf = { 'extension': 'tsv', 'suffix': ['regressors', 'timeseries'], 'desc': 'confounds', } filter_conf_json = { 'extension': 'json', 'suffix': ['regressors', 'timeseries'], 'desc': 'confounds', } filters_names = ['conf_raw', 'conf_json'] filters = [filter_conf, filter_conf_json] if include_no_aroma: filters.append(filter_fmri) filters_names.append('fmri_prep') if include_aroma: filters.append(filter_fmri_aroma) filters_names.append('fmri_prep_aroma') entity_files = {} for filter, filter_name in zip(filters, filters_names): files = layout.get(**entity, **filter) if len(files) != 1: return filter, entity_files entity_files[filter_name] = files[0] return False, entity_files @staticmethod def validate_files( layout: BIDSLayout, tasks: t.List[str], sessions: t.List[str], subjects: t.List[str], runs: t.List[str], include_aroma: bool, include_no_aroma: bool): """ Checks if for all parameters permutations every file (confounds.tsv, confounds.json, img, img with aroma) exists. Aroma and no aroma files are checked if proper flag is set to true. Args: layout (BIDSLayout): BIDSLayout tasks (List[str]): tasks that are expected to exist sessions (List[str]): tasks that are expected to exist subjects (List[str]): subjects that are expected to exist runs (List[str]): runs that are expected to exists include_aroma (bool): check for aroma files for every task/session/subject configuration include_no_aroma (bool): check for no aroma files for every task/session/subject configuration Returns: entity files and tuple with all tasks, subjects, sessions """ subjects_to_exclude = [] # Select interface behavior depending on user behavior if not tasks and not sessions and not subjects: raise_missing = False else: raise_missing = True subjects, tasks, sessions, runs = _fill_empty_lists(layout, subjects, tasks, sessions, runs) entities = _lists_to_entities(subjects, tasks, sessions, runs) entities_files = [] if raise_missing: # Raise error if there are missing files for entity in entities: missing, entity_files = BIDSValidate.get_entity_files(layout, include_no_aroma, include_aroma, entity) entities_files.append(entity_files) if missing: miss = {**entity, **missing} raise MissingFile( f'missing file(s) for {miss} (check if you are using AROMA pipelines)') else: # Log missing files and exclude subjects for missing files for entity in entities: missing, entity_files = BIDSValidate.get_entity_files(layout, include_no_aroma, include_aroma, entity) entities_files.append(entity_files) if missing: subjects_to_exclude.append(entity['subject']) miss = {**entity, **missing} import logging logger.warning(f'missing file(s) for {miss}') subjects = [subject for subject in subjects if subject not in subjects_to_exclude] return entities_files, (tasks, sessions, subjects, runs) def _run_interface(self,
name as appears in the specification. name = 'drem' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class dreturn(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0xaf #: The JVM instruction name as appears in the specification. name = 'dreturn' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class dstore(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x39 #: The JVM instruction name as appears in the specification. name = 'dstore' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = (('>B', 'I'),) #: True if this instruction can be prefixed by WIDE. can_be_wide = True class dstore_0(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x47 #: The JVM instruction name as appears in the specification. name = 'dstore_0' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class dstore_1(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x48 #: The JVM instruction name as appears in the specification. name = 'dstore_1' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class dstore_2(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x49 #: The JVM instruction name as appears in the specification. name = 'dstore_2' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class dstore_3(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x4a #: The JVM instruction name as appears in the specification. name = 'dstore_3' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class dsub(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x67 #: The JVM instruction name as appears in the specification. name = 'dsub' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class dup(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x59 #: The JVM instruction name as appears in the specification. name = 'dup' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class dup_x1(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x5a #: The JVM instruction name as appears in the specification. name = 'dup_x1' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class dup_x2(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x5b #: The JVM instruction name as appears in the specification. name = 'dup_x2' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class dup2(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x5c #: The JVM instruction name as appears in the specification. name = 'dup2' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class dup2_x1(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x5d #: The JVM instruction name as appears in the specification. name = 'dup2_x1' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class dup2_x2(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x5e #: The JVM instruction name as appears in the specification. name = 'dup2_x2' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class f2d(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x8d #: The JVM instruction name as appears in the specification. name = 'f2d' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class f2i(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x8b #: The JVM instruction name as appears in the specification. name = 'f2i' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class f2l(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x8c #: The JVM instruction name as appears in the specification. name = 'f2l' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class fadd(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x62 #: The JVM instruction name as appears in the specification. name = 'fadd' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class faload(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x30 #: The JVM instruction name as appears in the specification. name = 'faload' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class fastore(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x51 #: The JVM instruction name as appears in the specification. name = 'fastore' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class fcmpg(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x96 #: The JVM instruction name as appears in the specification. name = 'fcmpg' #: Alias for the `name` property. mnemonic = name #: List of operands this instruction takes, if any. fmt = () #: True if this instruction can be prefixed by WIDE. can_be_wide = False class fcmpl(Instruction): """""" __slots__ = () #: Numerical opcode for this instruction. op = 0x95 #: The
which contains user input. Returns ------- pandas.core.frame.DataFrame Dataframe containing all rows from google worksheet. """ return pd.DataFrame(worksheet.get_all_records()) def update_google_worksheet(worksheet, df): """ Update new user input from df to connected worksheet Parameters ---------- worksheet : gspread.models.Worksheet Connected worksheet which contains user input. df : pandas.core.frame.DataFrame Dataframe to write to worksheet. Returns ------- None. """ # Get column names column_name = df.columns.values.tolist() # Get value to append to worksheet row_value = df.values.tolist() # Update worksheet worksheet.update([column_name] + row_value) def build_connection(): """ Initialize credentials object and drive_service object to interact with Google Drive API. ref: https://google-auth.readthedocs.io/en/master/_modules/google/oauth2/service_account.html#Credentials.from_service_account_info Info argument in credentials object is a .toml file stored in streamlit.io. Note that credentials object is initialized in a similar fashion to retrain.load_google_worksheet_from_info(). However, one stark differences is that the info argument here is structured in .toml file (saved in streamlit.io); while the latter's info argument is a constructed dict object with some of its values saved in Github secrets. This is because github secrets does not allow storage of any structured file format, therefore the workaround. Since app.py will be run entirely from streamlit.io, there is no need to store the .toml file in Github secrets. Returns ------- credentials : google.oauth2.service_account.Credentials Credentials object for Google service account built using credentials obtained from GCP > IAM & Admin > Service Accounts > KEYS. drive_service : googleapiclient.discovery.Resource Initialized Resource to interact with Google Drive API. Ref: https://googleapis.github.io/google-api-python-client/docs/epy/googleapiclient.discovery-module.html """ # Create a connection object credentials = service_account.Credentials.from_service_account_info( st.secrets["gcp_service_account"], scopes=["https://www.googleapis.com/auth/spreadsheets", "https://www.googleapis.com/auth/drive"]) drive_service = build('drive', 'v3', credentials=credentials) return credentials, drive_service def get_modified_time(file_id, drive_service, server_tz, local_tz): """ Get latest modified time from file stored in Google Drive folder Parameters ---------- file_id : str Unique ID of file stored in Google Drive. drive_service : googleapiclient.discovery.Resource Initialized Resource to interact with Google Drive API. server_tz : pytz.tzfile.Etc/UTC timezone information for server location. local_tz : str Region for user timezone eg "Asia/Kuala_Lumpur". Returns ------- str Description of last modified time. """ metadata = drive_service.files().get(fileId=file_id, fields='modifiedTime').execute() mtime = pd.to_datetime(metadata['modifiedTime'], format="%Y-%m-%d %H:%M").tz_convert(server_tz).tz_convert(local_tz) return f'Last Updated at {mtime.year}-{mtime.month}-{mtime.day} {mtime.hour}:{mtime.minute}' def connect_googlesheet(googlesheet_name, credentials): """ Connect to existing Google Sheet Parameters ---------- googlesheet_name : str Name of Private Google Sheet accessible by created Google service account. credentials : google.oauth2.service_account.Credentials Credentials object for Google service account built using credentials obtained from GCP > IAM & Admin > Service Accounts > KEYS. Returns ------- worksheet : gspread.models.Worksheet Connected Google worksheet. spread : gspread_pandas.spread.Spread Connected Google Spreadsheet. """ # Get google sheet url from secrets sheet_url = st.secrets["private_gsheets_url"] # Initialize a Client instance and authorize to access spreadsheets via Google Sheets API (via OAuth2 credentials) # ref: https://docs.gspread.org/en/latest/api.html#gspread.authorize gc = gspread.authorize(credentials) # Open the google sheet sh = gc.open_by_url(sheet_url) # Open the worksheet worksheet = sh.worksheet(title=googlesheet_name) # Create an instance of Client class to comunicate with Google API # ref: https://gspread-pandas.readthedocs.io/en/latest/gspread_pandas.html#gspread_pandas.client.Client client = Client(creds=credentials) # Create an instance of Spread class to interact with Google spreadsheet using Pandas # ref: https://gspread-pandas.readthedocs.io/en/latest/gspread_pandas.html#gspread_pandas.spread.Spread spread = Spread(spread=sheet_url, client=client) return worksheet, spread def update_googlesheet_gspread_pandas(spread, googlesheet_name, df): """ Updates data in connected Google Spreadsheet Parameters ---------- spread : gspread_pandas.spread.Spread Connected Google Spreadsheet. googlesheet_name : str Name of Google Worksheet. df : pandas.core.frame.DataFrame Dataframe to update to Google Worksheet. Returns ------- None. """ col = ['datetime', 'tweet', 'polarity', 'user_input_timestamp'] spread.df_to_sheet(df[col], sheet=googlesheet_name, index=False) def polarity_formatter(my_col): """ Format polarity column to highlight row based on polarity. Rows for negative polarity is highlighted in red, and rows for positive polarity is highlighted in green Parameters ---------- my_col : pandas.core.series.Series Column to be formatted. Returns ------- bokeh.models.widgets.tables.HTMLTemplateFormatter HTML Template Formatter with user-defined template. """ template = """ <div style="background:<%= (function colorfromint(){ if(result_col == 'positive') {return('#84ddb4')} else if (result_col == 'negative') {return('#e74d3c')} }()) %>; color: white"> <p style="text-align:center;"> <%= value %></p> </div> """.replace('result_col', my_col) return HTMLTemplateFormatter(template=template) @st.cache def convert_df(df): """ Convert pandas dataframe into csv file for user to download Parameters ---------- df : pandas.core.frame.DataFrame Dataframe to converted to csv to. Returns ------- NoneType Function to convert pandas dataframe to csv file. """ return df.to_csv().encode('utf-8') def get_agg_data(cust_tweets, dhl_tweets): """ Aggregate Tweets data into multiindexed dataframe (datatime, week, year). Aggregation function used is summation. Parameters ---------- cust_tweets : pandas.core.frame.DataFrame Dataframe containing Tweets from customers. dhl_tweets : pandas.core.frame.DataFrame Dataframe containing Tweets from DHL associated accounts. Returns ------- pandas.core.frame.DataFrame Pivoted multiindexed pandas dataframe aggregated using summation. """ # Get polarity data from cust_tweets # Get one-hot encoded columns for 'polarity' sum_polarity = pd.concat([pd.get_dummies(cust_tweets[['datetime', 'year', 'polarity']]), cust_tweets[['week']]], axis=1).add_suffix('_mentions') # Add count for each tweets sum_polarity['count_cust_tweets'] = 1 # Ensure that columns exist if 'polarity_positive_mentions' not in sum_polarity.columns: sum_polarity['polarity_positive_mentions'] = sum_polarity['polarity_negative_mentions'].apply(lambda x: 0 if x==1 else np.nan) if 'polarity_negative_mentions' not in sum_polarity.columns: sum_polarity['polarity_negative_mentions'] = sum_polarity['polarity_positive_mentions'].apply(lambda x: 0 if x==1 else np.nan) # Get engagement data from dhl_tweets # Slice dataframe to get only engagement details sum_engagement = dhl_tweets[['datetime', 'week', 'year', 'replies', 'retweets', 'likes']].copy() # Add count for each tweets sum_engagement['count_dhl_tweets'] = 1 # Append both dfs sum_df = sum_engagement.append(sum_polarity, sort=False) # if value is na, copy 'week_mentions' sum_df['week'] = sum_df.apply(lambda row: np.where(pd.isna(row['week']), row['week_mentions'], row['week']), axis=1) # if value is na, copy 'year_mentions' sum_df['year'] = sum_df.apply(lambda row: np.where(pd.isna(row['year']), row['year_mentions'], row['year']), axis=1) sum_df['datetime'] = sum_df.apply(lambda row: np.where(pd.isna(row['datetime']), row['datetime_mentions'], row['datetime']), axis=1) # reset index and drop original sum_df = sum_df.reset_index(drop=True) # change unhashable np.array of dtype=object to dtype=np.int sum_df['datetime'] = sum_df['datetime'].apply(lambda x: x.astype(str)) sum_df['week'] = sum_df['week'].apply(lambda x: x.astype(str)) sum_df['year'] = sum_df['year'].apply(lambda x: x.astype(str)) return pd.pivot_table(sum_df, values=['replies', 'retweets', 'likes', 'count_dhl_tweets', 'polarity_negative_mentions', 'polarity_positive_mentions', 'count_cust_tweets'], index=['datetime', 'week', 'year'], aggfunc=np.sum, fill_value=0) def plot_custom_graph(df, x, y, chart_type, agg_type): """ Plot altair chart from user input Parameters ---------- df : pandas.core.frame.DataFrame Pivoted (datetime, week, year) multiindexed dataframe using sum aggregation function. x : str X axis name from user input. y : list List of column names from user input. chart_type : str Name of chart type from user input. agg_type : str String depicting data aggregation type from user input. Returns ------- altair.vegalite.v4.api.Chart Rendered chart. """ width = 800 x_dict = { 'Hours of the day': 'hours(datetime):T', 'Day of the week': 'day(datetime):O', 'Day of the month': 'date(datetime):O', 'Week': 'week:Q', 'Date': 'monthdate(datetime):O', 'Month': 'month(datetime):O', 'Quarter': 'quarter(datetime):O', 'Year': 'year(datetime):O' } agg_type_dict = { 'Total number of Tweets': 'sum(value):Q', 'Average number of Tweets': 'average(value):Q', 'Min number of Tweets': 'min(value):Q', 'Max number of Tweets': 'max(value):Q' } if chart_type == 'Scatter': return alt.Chart( data = df, width = width ).transform_fold( y ).mark_circle().encode( alt.X(x_dict[x], title=x), alt.Y(agg_type_dict[agg_type], title='value'), color='key:N', tooltip = [alt.Tooltip(x_dict[x]), alt.Tooltip('key:N'), alt.Tooltip(agg_type_dict[agg_type])] ).configure_mark( strokeWidth=10 ).interactive() elif chart_type == 'Line': return alt.Chart( data = df, width = width ).transform_fold( y ).mark_line().encode( alt.X(x_dict[x], title=x), alt.Y(agg_type_dict[agg_type]), color='key:N', tooltip = [alt.Tooltip(x_dict[x]), alt.Tooltip('key:N'), alt.Tooltip(agg_type_dict[agg_type])] ).configure_mark( strokeWidth=3 ).interactive() elif chart_type == 'Area': return alt.Chart( data = df, width = width ).transform_fold( y ).mark_area().encode( alt.X(x_dict[x], title=x), alt.Y(agg_type_dict[agg_type]), color='key:N', tooltip = [alt.Tooltip(x_dict[x]), alt.Tooltip('key:N'), alt.Tooltip(agg_type_dict[agg_type])] ).configure_mark( strokeWidth=10 ).interactive() elif chart_type == 'Bar': return alt.Chart( data = df, width = width ).transform_fold( y ).mark_bar().encode( alt.X(x_dict[x], title=x), alt.Y(agg_type_dict[agg_type]), color='key:N', tooltip = [alt.Tooltip(x_dict[x]), alt.Tooltip('key:N'), alt.Tooltip(agg_type_dict[agg_type])] ).configure_mark( strokeWidth=10 ).interactive() elif chart_type == 'Heatmap': return alt.Chart(df).transform_fold(y).mark_rect().encode( alt.X('hours(datetime):O', title='Hours of the day'), alt.Y('day(datetime):O', title='Day'), alt.Row('key:O', title=''), color=agg_type_dict[agg_type], tooltip = [alt.Tooltip('hours(datetime):O'), alt.Tooltip('day(datetime):O'), alt.Tooltip('key:N'), alt.Tooltip(agg_type_dict[agg_type])] ).properties( width=610, height=150 ) def agg_by_period(agg_df, groupby_var): """ Aggregate numerical data by `groupby_var`. Calculate percentage and percentage change for all KPI categories. Parameters ---------- agg_df : pandas.core.frame.DataFrame Dataframe returned by `get_agg_data(cust_tweets, dhl_tweets)`. groupby_var : str Variable to aggregate the data by. One of ['year', 'quarter', 'month', 'week', 'day']. Returns ------- agg_df_period : pandas.core.frame.DataFrame Dataframe containing numerical data agggreagated by `groupby_var`, within columns [`groupby_var`, 'variable', 'value', 'sum', 'percentage', 'pct_change']. """ # Transform datetime column to datetime format agg_df['datetime'] = pd.to_datetime(agg_df['datetime']) # Get respective time period from datetime column if groupby_var == 'quarter': agg_df['quarter'] = agg_df['datetime'].apply(lambda x: x.quarter) elif groupby_var == 'month': agg_df['month'] = agg_df['datetime'].apply(lambda x: x.month) elif groupby_var == 'day': agg_df['day'] = agg_df['datetime'].apply(lambda x: x.day) # Aggregate by `groupby_var` by summation agg_df_period = agg_df.melt(id_vars=groupby_var, value_vars=['Total Customer Mentions',
from typing import Union, List, Callable, Optional import torch from torch import Tensor from torch import nn import math class FeedForward(nn.Module): """ Class for feedforward neural network model. Takes a list of pytorch tensors holding the weight initializations and ties these together into a trainable neural network. """ def __init__(self, layer_weights: List[Tensor], biases: List[Tensor], nonlinearities: List[Callable]): """ Parameters ---------- layer_weights : List[Tensor] List of the layer initializations. biases : List[Tensor] List of the bias initializations. nonlinearities : List[Callable] List of the nonlinearities used in the layers. """ super().__init__() self.layer_weights = nn.ParameterList([nn.Parameter(layer, requires_grad=True) for layer in layer_weights]) self.biases = nn.ParameterList([nn.Parameter(bias, requires_grad=True) for bias in biases]) self.nonlinearities = nonlinearities def forward(self, inputs: Tensor): hid = inputs for layer, nonlinearity, bias in zip(self.layer_weights, self.nonlinearities, self.biases): hid = nonlinearity(hid@layer + bias) return hid def get_pre_activations(self, inputs: Tensor): hid = inputs pre_activations = [] for layer, nonlinearity, bias in zip(self.layer_weights, self.nonlinearities, self.biases): pre_activation = hid@layer + bias hid = nonlinearity(pre_activation) pre_activations.append(pre_activation.detach()) return pre_activations def get_post_activations(self, inputs: Tensor): hid = inputs post_activations = [] for layer, nonlinearity, bias in zip(self.layer_weights, self.nonlinearities, self.biases): hid = nonlinearity(hid@layer + bias) post_activations.append(hid.detach()) return post_activations def get_activations(self, inputs: Tensor): hid = inputs activations = [] for layer, nonlinearity, bias in zip(self.layer_weights, self.nonlinearities, self.biases): pre_activation = hid@layer + bias hid = nonlinearity(pre_activation) activations.append(pre_activation.detach()) activations.append(hid.detach()) return activations class DenseRandomFF(FeedForward): """ Feedforward net. Weights are initialized according to two factors: how close to an "identity" matrix the weights are, and a gain factor. Biases are initialized to zero. """ def __init__(self, input_dim: int, hidden_dims: Union[int, List[int]], output_dim: int, num_layers: int, pert_factor: float = 1., gain_factor: float = 0.4, nonlinearity: Union[str, Callable] = 'relu', normalize: bool = True): if isinstance(nonlinearity, Callable): self.nonlinearity = nonlinearity elif isinstance(nonlinearity, str): if nonlinearity == 'tanh' or nonlinearity == 'Tanh': self.nonlinearity = torch.tanh elif nonlinearity == 'relu' or nonlinearity == 'ReLU': def relu(x): return torch.clamp(x, min=0) self.nonlinearity = relu else: raise AttributeError("nonlinearity not recognized.") else: raise AttributeError("nonlinearity not recognized.") layer_weights = [] nonlinearities = [] biases = [] if not hasattr(hidden_dims, '__len__'): N = hidden_dims hidden_dims = [N for x in range(num_layers)] else: if len(hidden_dims) != num_layers: raise ValueError("Length of hidden_dims does not match num_layers") # input weights input_w_id = torch.eye(input_dim, hidden_dims[0]) if normalize: input_w_random = gain_factor*torch.randn(input_dim, hidden_dims[0])/math.sqrt(input_dim) else: input_w_random = gain_factor*torch.randn(input_dim, hidden_dims[0]) input_w = (1 - pert_factor)*input_w_id + pert_factor*input_w_random layer_weights.append(input_w) nonlinearities.append(self.nonlinearity) biases.append(torch.zeros(hidden_dims[0])) # hidden layer weights for i0 in range(num_layers - 1): hidden_w_id = torch.eye(hidden_dims[0], hidden_dims[1]) if normalize: hidden_w_random = gain_factor*torch.randn(hidden_dims[i0], hidden_dims[i0 + 1])/math.sqrt( hidden_dims[i0]) else: hidden_w_random = gain_factor*torch.randn(hidden_dims[i0], hidden_dims[i0 + 1]) hidden_w = (1 - pert_factor)*hidden_w_id + pert_factor*hidden_w_random layer_weights.append(hidden_w) nonlinearities.append(self.nonlinearity) biases.append(torch.zeros(hidden_dims[i0 + 1])) # output layer weights output_w_id = torch.eye(hidden_dims[-1], output_dim) if normalize: output_w_random = gain_factor*torch.randn(hidden_dims[-1], output_dim)/math.sqrt(hidden_dims[-1]) else: output_w_random = gain_factor*torch.randn(hidden_dims[-1], output_dim) output_w = (1 - pert_factor)*output_w_id + pert_factor*output_w_random layer_weights.append(output_w) nonlinearities.append(self.nonlinearity) biases.append(torch.zeros(output_dim)) super().__init__(layer_weights, biases, nonlinearities) # noinspection PyArgumentList class RNN(nn.Module): """ Recurrent Neural Network (RNN). This is a "vanilla" implementation with the typical machine-learning style equations: h_{t+1} = nonlinearity(h_{t} @ recurrent_weights + recurrent_bias) -- hidden unit update """ def __init__(self, input_weights: Tensor, recurrent_weights: Tensor, output_weights: Tensor, recurrent_bias: Tensor, output_bias: Tensor, nonlinearity: Optional[Union[str, Callable]], hidden_unit_init: Optional[Union[str, Callable]] = None, train_input: bool = False, train_recurrent: bool = True, train_output: bool = True, train_recurrent_bias: bool = True, train_output_bias: bool = True, output_over_recurrent_time: bool = False): """ Parameters ---------- input_weights : Tensor Input weight initialization. recurrent_weights : Tensor Recurrent weight initialization. output_weights : Tensor Output weight initialization. recurrent_bias : Tensor Recurrent bias vector initialization. output_bias : Tensor Output bias vector initialization. nonlinearity : Optional[Union[str, Callable]] The nonlinearity to use for the hidden unit activation function. hidden_unit_init : Optional[Union[str, Callable]] Initial value for the hidden units. The network is set to this value at the beginning of every input batch. Todo: make it so the hidden state can carry over input batches. train_input : bool True: train the input weights, i.e. set requires_grad = True for the input weights. False: keep the input weights fixed to their initial value over training. train_recurrent : bool True: train the recurrent weights. False: keep the recurrent weights fixed to their initial value over training. train_output : bool True: train the output weights. False: keep the output weights fixed to their initial value over training. train_recurrent_bias : bool True: train the recurrent bias. False: keep the recurrent bias fixed to its initial value over training. train_output_bias : bool True: train the output bias. False: keep the output bias fixed to its initial value over training. output_over_recurrent_time : bool True: Return network output over the recurrent timesteps. False: Only return the network output at the last timestep. """ super().__init__() if isinstance(nonlinearity, Callable): self.nonlinearity = nonlinearity elif isinstance(nonlinearity, str): if nonlinearity == 'tanh' or nonlinearity == 'Tanh': self.nonlinearity = torch.tanh elif nonlinearity == 'relu' or nonlinearity == 'ReLU': def relu(x): return torch.clamp(x, min=0) self.nonlinearity = relu else: raise AttributeError("nonlinearity not recognized.") else: raise AttributeError("nonlinearity not recognized.") if hidden_unit_init is None: self.hidden_unit_init = torch.zeros(recurrent_weights.shape[0]) elif isinstance(hidden_unit_init, Tensor): self.hidden_unit_init = hidden_unit_init.clone() else: raise AttributeError("hidden_unit_init option not recognized.") if train_input: self.Win = nn.Parameter(input_weights.clone(), requires_grad=True) else: self.Win = nn.Parameter(input_weights.clone(), requires_grad=False) if train_recurrent: self.Wrec = nn.Parameter(recurrent_weights.clone(), requires_grad=True) else: self.Wrec = nn.Parameter(recurrent_weights.clone(), requires_grad=False) if train_output: self.Wout = nn.Parameter(output_weights.clone(), requires_grad=True) else: self.Wout = nn.Parameter(output_weights.clone(), requires_grad=False) if train_recurrent_bias: self.brec = nn.Parameter(recurrent_bias.clone(), requires_grad=True) else: self.brec = nn.Parameter(recurrent_bias.clone(), requires_grad=False) if train_output_bias: self.bout = nn.Parameter(output_bias.clone(), requires_grad=True) else: self.bout = nn.Parameter(output_bias.clone(), requires_grad=False) self.output_over_recurrent_time = output_over_recurrent_time def forward(self, inputs: Tensor): hid = self.hidden_unit_init if self.output_over_recurrent_time: # out = [hid] out = torch.zeros(inputs.shape[0], inputs.shape[1], self.Wout.shape[-1]) for i0 in range(inputs.shape[1]): preactivation = hid@self.Wrec + inputs[:, i0]@self.Win + self.brec hid = self.nonlinearity(preactivation) # out.append(hid@self.Wout + self.bout) out[:, i0] = hid@self.Wout + self.bout return out else: for i0 in range(inputs.shape[1]): preactivation = hid@self.Wrec + inputs[:, i0]@self.Win + self.brec hid = self.nonlinearity(preactivation) out = hid@self.Wout + self.bout return out def get_pre_activations(self, inputs: Tensor): hid = self.hidden_unit_init preactivations = [] for i0 in range(inputs.shape[1]): preactivation = hid@self.Wrec + inputs[:, i0]@self.Win + self.brec hid = self.nonlinearity(preactivation) preactivations.append(preactivation.detach()) out = hid@self.Wout + self.bout preactivations.append(out.detach()) return preactivations def get_post_activations(self, inputs: Tensor): hid = self.hidden_unit_init postactivations = [] for i0 in range(inputs.shape[1]): preactivation = hid@self.Wrec + inputs[:, i0]@self.Win + self.brec hid = self.nonlinearity(preactivation) postactivations.append(hid.detach()) out = hid@self.Wout + self.bout postactivations.append(out.detach()) return postactivations def get_activations(self, inputs: Tensor): hid = self.hidden_unit_init activations = [] for i0 in range(inputs.shape[1]): preactivation = hid@self.Wrec + inputs[:, i0]@self.Win + self.brec hid = self.nonlinearity(preactivation) activations.append(preactivation.detach()) activations.append(hid.detach()) out = hid@self.Wout + self.bout activations.append(out.detach()) return activations class StaticInputRNN(RNN): def forward(self, inputs: Tensor, num_recurrent_steps): hid = self.hidden_unit_init preactivation = hid@self.Wrec + inputs@self.Win + self.brec hid = self.nonlinearity(preactivation) for i0 in range(num_recurrent_steps - 1): preactivation = hid@self.Wrec + self.brec hid = self.nonlinearity(preactivation) out = hid@self.Wout + self.bout return out def get_pre_activations(self, inputs: Tensor, num_recurrent_steps): hid = self.hidden_unit_init preactivations = [] preactivation = hid@self.Wrec + inputs@self.Win + self.brec hid = self.nonlinearity(preactivation) preactivations.append(preactivation.detach()) for i0 in range(num_recurrent_steps - 1): preactivation = hid@self.Wrec + self.brec hid = self.nonlinearity(preactivation) preactivations.append(preactivation.detach()) out = hid@self.Wout + self.bout preactivations.append(out.detach()) return preactivations def get_post_activation(self, inputs: Tensor, num_recurrent_steps): hid = self.hidden_unit_init postactivations = [] preactivation = hid@self.Wrec + inputs@self.Win + self.brec hid = self.nonlinearity(preactivation) postactivations.append(hid.detach()) for i0 in range(num_recurrent_steps - 1): preactivation = hid@self.Wrec + self.brec hid = self.nonlinearity(preactivation) postactivations.append(hid.detach()) out = hid@self.Wout + self.bout postactivations.append(out.detach()) return postactivations def get_activations(self, inputs: Tensor, num_recurrent_steps): hid = self.hidden_unit_init activations = [] preactivation = hid@self.Wrec + inputs@self.Win + self.brec hid = self.nonlinearity(preactivation) activations.append(preactivation.detach()) activations.append(hid.detach()) for i0 in range(num_recurrent_steps - 1): preactivation = hid@self.Wrec + self.brec hid = self.nonlinearity(preactivation) activations.append(preactivation.detach()) activations.append(hid.detach()) out = hid@self.Wout + self.bout activations.append(out.detach()) return activations class SompolinskyRNN(RNN): """ Recurrent Neural Network (RNN) with Haim Sompolinsky style dynamics: h' = -h + nonlinearity(h)@Wrec + input@Win + recurrent_bias. These are discretized via forward Euler method to get the update h_{t+1} = h_{t} + dt(-h_{t} + nonlinearity(h_{t}) @ Wrec + input_{t}@Win + recurrent_bias) Here h is like a current input (membrane potential) and nonlinearity(h_{t}) is like a "firing rate". """ def __init__(self, input_weights: Tensor,
1 for i in self.complex_matrix[self.species.index(s),:]) and s not in sink and s not in source for s in species_list) def _intermediate_species(self): """Indices of species that are not sink or souce species.""" source = self._source_species() sink = self._sink_species() return [s for s in range(self.n_species) if s not in source and s not in sink] def is_intermediate_species(self, s): """Check if s is an intermediate species.""" return (not self.is_source_species(s)) and (not self.is_sink_species(s)) @property def intermediate_species(self): """Species that are not sink or souce species.""" return [self.species[s] for s in self._intermediate_species()] def _intermediate_stoich_1_species(self): """Indices of intermediate species that never appear with stoichiometry greater than one.""" intermediates = self._intermediate_species() return [s for s in self._stoich_1_species() if s in intermediates] @property def intermediate_stoich_1_species(self): """Intermediate species that never appear with stoichiometry greater than one.""" return [self.species[s] for s in self._intermediate_stoich_1_species()] def _intermediate_simple_complexes(self): """Indices of simple intermediate complexes.""" intermediates = self._intermediate_complexes() simple = self._simple_complexes() return [c for c in intermediates if c in simple] @property def intermediate_simple_complexes(self): """Simple intermediate complexes.""" return [self.complexes[c] for c in self._intermediate_simple_complexes()] ### Reduction ### def remove(self, rapid_eq = None, qss = None, cons_law = None, minimal = False, remove_const = False, \ merge_reacts = False, adjust = False, debug = False, network_file = None): """Remove intermediates either by rapid-equilibrium or by quasi-steady state approximation, with an optional conservation law. :param rapid_eq: list of pairs species-complexes. The species are to be replaced by complexes using rapid equilibrium approximation. :type rapid_eq: list of (string (species), string (complex)). :param qss: list of species to remove via qssa. :type qss: list of strings :param cons_law: remove a species using a conservation. All other species involved in in the conservation are eliminated first, via quasi-steady state approximation, if not listed in *rapid_eq*. :type cons_law: (string, ConsLaw) :param minimal: find network of minimal structure when applying qss. :type minimal: boolean :param remove_const: remove any species left constant after the reduction. :type remove_const: boolean :param merge_reacts: merge reactions with the same reactant and product. :type merge_reacts: boolean :param adjust: change the rates so that they reflect the reactant species. :type adjust: boolean :param network_file: save the reduction steps to file with the given path. :type network_file: string Update remove_species. :Example: >>> from crnpy.crn import CRN, from_react_strings >>> net = from_react_strings(["s + e (k_1)<->(k1) es", "es ->(k2) e + p", "i + e (k_3)<->(k3) ei", "i + es (k_3)<->(k3) esi", "s + ei (k_1)<->(k1) esi"]) >>> cl = ('e', ConsLaw('e + ei + es + esi', 'et')) >>> net.remove(rapid_eq = [('ei', 'e + i'), ('esi', 'e + s + i'), ('es', 's + e')], cons_law = cl) >>> net.reactions (r1: s ->(et*k1*k2*k_3/(i*k1*k3*s + i*k3*k_1 + k1*k_3*s + k_1*k_3)) p,) """ if cons_law: add_species = [str(c) for c in cons_law[1].species.keys() if c != sp.Symbol(cons_law[0])] else: add_species = [] if rapid_eq != None: rapid_eq_species = [pair[0] for pair in rapid_eq] else: rapid_eq_species, rapid_eq = [], [] if qss != None: qss_species = qss + [a for a in add_species if a not in rapid_eq_species and a not in qss] else: qss_species = [a for a in add_species if a not in rapid_eq_species] if network_file: self.save_to_file(network_file, overwrite = 'w', log = "Original network") # rapid-equilibrium for pair in rapid_eq: self._rapid_eq(pair, debug) if network_file: self.save_to_file(network_file, log = "Rapid eq. on {}, {}".format(pair[0], pair[1])) if debug: print("removed_species after rapid_eq: {}".format(self.removed_species)) # qss self._qss(qss_species, minimal = minimal, network_file = network_file, debug = debug) if debug: print("removed_species after QSS: {}".format(self.removed_species)) # use conservation law to rewrite cons_law[0] in terms of remaining variables if cons_law: self.remove_by_cons(cons_law[0], cons_law[1], debug) # optional changes if remove_const: self.remove_all_constants() if network_file: self.save_to_file(network_file, log = "Removed constants") if merge_reacts: self.merge_reactions() if network_file: self.save_to_file(network_file, log = "Merged reactions") if adjust: self._fix_ma() if network_file: self.save_to_file(network_file, rs = True, log = "Final network") def remove_by_cons(self, species, cons_law, debug = False): """Remove a species using a conservation law. First replace removed_species in the conservation law with their expression. Then use the conservation expression to write the species concentration as function of the remaining species. :Example: >>> from crnpy.crn import CRN, from_react_strings >>> net = from_react_strings(["E + S (k_1)<->(k1) C", "C ->(k2) E + P"]) >>> net.qss("C") >>> net.reactions (r0_r1: E + S ->(k1*k2/(k2 + k_1)) E + P,) >>> net.removed_species (('C', E*S*k1/(k2 + k_1)),) >>> cl = ConsLaw("E + C", "etot") >>> net.remove_by_cons("E", cl) >>> net.reactions (r0_r1: S ->(etot*k1*k2/(S*k1 + k2 + k_1)) P,) References: Tonello et al. (2016), On the elimination of intermediate species in chemical reaction networks. """ conservation = cons_law.expression if debug: print("Removed species: {}".format(self.removed_species)) print("Conservation: {}".format(conservation)) for variable, expr in self.removed_species: if debug: print("Replacing {} with {}".format(variable, expr)) conservation = conservation.subs(variable, expr) if debug: print("Found {}".format(conservation)) print # The next is quicker, but not always applicable #conservation = (conservation / sp.Symbol(species)).cancel() #exp = cons_law.constant / conservation exp = sp.solve(conservation - cons_law.constant, sp.Symbol(species))[0] # remove species self.remove_constant(species, expr = exp) if debug: print("Remove by Conservation: added to removed_species {}".format(self.removed_species)) def qss(self, intermediate = None, cons_law = None, minimal = False, remove_const = False, \ merge_reacts = False, adjust = False, debug = False, network_file = None): """Remove an intermediate species via quasi-steady state approximation. Keyword arguments: :param intermediate: species to remove via qssa. :type intermediate: string :param cons_law: remove a species using a conservation. All other species involved in in the conservation are eliminated first, via quasi-steady state approximation. :type cons_law: (string, ConsLaw) :param minimal: find network of minimal structure when applying qss. :type minimal: boolean :param remove_const: remove any species left constant after the reduction. :type remove_const: boolean :param merge_reacts: merge reactions with the same reactant and product. :type merge_reacts: boolean :param adjust: change the rates so that they reflect the reactant species. :type adjust: boolean :param network_file: save the reduction steps to file with the given path. :type network_file: string :Example: >>> from crnpy.crn import CRN, from_react_strings >>> net = from_react_strings(["E + S (k_1)<->(k1) C", "C ->(k2) E + P"]) >>> net.qss("C") >>> net.reactions (r0_r1: E + S ->(k1*k2/(k2 + k_1)) E + P,) >>> net.removed_species (('C', E*S*k1/(k2 + k_1)),) >>> net = from_react_strings(["E + S (k_1)<->(k1) C", "C ->(k2) E + P"]) >>> net.qss("C", cons_law = ("E", ConsLaw("E + C", "etot"))) >>> net.reactions (r0_r1: S ->(etot*k1*k2/(S*k1 + k2 + k_1)) P,) Update removed_species. References: Tonello et al. (2016), On the elimination of intermediate species in chemical reaction networks. Madelaine et al. (2016), Normalizing Chemical Reaction Networks by Confluent Structural Simplification (CMSB). """ return self.remove(qss = ([intermediate] if intermediate else None), cons_law = cons_law, adjust = adjust, minimal = minimal, debug = debug, \ network_file = network_file, remove_const = remove_const, merge_reacts = merge_reacts) def _qss(self, intermediates, minimal = False, error_if_missing = True, network_file = None, debug = False): """Eliminate the intermediates via quasi-steady state approximation.""" if debug: print("Intermediates to remove: {}".format(intermediates)) if minimal: fvs = dict((self.reactions[r].reactionid, \ [0 if i != r else 1 for i in range(self.n_reactions)]) \ for r in range(self.n_reactions)) # flux vectors nr = self.n_reactions # flux vector length reactions = self.reactions for intermediate in intermediates: # if intermediate not in species, raise error or warning if intermediate not in self.species: if error_if_missing: raise ValueError("Species {} not in network.".format(intermediate)) else: warnings.warn("Species {} not in network.".format(intermediate)) return if debug: print("Removing: {}".format(intermediate)) # Error if species has stoichiometry greater than 1 if any(s > 1 for s in self.complex_matrix[self.species.index(intermediate), :]): raise ValueError("Species {} appears with stoichiometry greater than 1.".format(intermediate)) # Error if species has a non linear derivative hasLinearDyn = self.has_linear_equation(intermediate) if not hasLinearDyn: raise ValueError("Species {} has nonlinear kinetics.".format(intermediate)) gens = [] # generators newreactions = [] reactReactions = [] prodReactions = [] # case of intermediate in both reactant and product: # move intermediate to rate for r in reactions: r.remove_react_prod(intermediate) # separate reactions into reactions producing intermediate # and reactions consuming the intermediate for r in range(len(reactions)): if intermediate in
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests the filesystem backend store""" import errno import hashlib import json import os import stat from unittest import mock import uuid import fixtures from oslo_utils.secretutils import md5 from oslo_utils import units import six from six.moves import builtins # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance_store._drivers import filesystem from glance_store import exceptions from glance_store import location from glance_store.tests import base from glance_store.tests.unit import test_store_capabilities class TestStore(base.StoreBaseTest, test_store_capabilities.TestStoreCapabilitiesChecking): def setUp(self): """Establish a clean test environment.""" super(TestStore, self).setUp() self.store = filesystem.Store(self.conf) self.config(filesystem_store_datadir=self.test_dir, filesystem_store_chunk_size=10, stores=['glance.store.filesystem.Store'], group="glance_store") self.store.configure() self.register_store_schemes(self.store, 'file') self.hash_algo = 'sha256' def _create_metadata_json_file(self, metadata): expected_image_id = str(uuid.uuid4()) jsonfilename = os.path.join(self.test_dir, "storage_metadata.%s" % expected_image_id) self.config(filesystem_store_metadata_file=jsonfilename, group="glance_store") with open(jsonfilename, 'w') as fptr: json.dump(metadata, fptr) def _store_image(self, in_metadata): expected_image_id = str(uuid.uuid4()) expected_file_size = 10 expected_file_contents = b"*" * expected_file_size image_file = six.BytesIO(expected_file_contents) self.store.FILESYSTEM_STORE_METADATA = in_metadata return self.store.add(expected_image_id, image_file, expected_file_size, self.hash_algo) def test_get(self): """Test a "normal" retrieval of an image in chunks.""" # First add an image... image_id = str(uuid.uuid4()) file_contents = b"chunk00000remainder" image_file = six.BytesIO(file_contents) loc, size, checksum, multihash, _ = self.store.add( image_id, image_file, len(file_contents), self.hash_algo) # Now read it back... uri = "file:///%s/%s" % (self.test_dir, image_id) loc = location.get_location_from_uri(uri, conf=self.conf) (image_file, image_size) = self.store.get(loc) expected_data = b"chunk00000remainder" expected_num_chunks = 2 data = b"" num_chunks = 0 for chunk in image_file: num_chunks += 1 data += chunk self.assertEqual(expected_data, data) self.assertEqual(expected_num_chunks, num_chunks) def test_get_random_access(self): """Test a "normal" retrieval of an image in chunks.""" # First add an image... image_id = str(uuid.uuid4()) file_contents = b"chunk00000remainder" image_file = six.BytesIO(file_contents) loc, size, checksum, multihash, _ = self.store.add( image_id, image_file, len(file_contents), self.hash_algo) # Now read it back... uri = "file:///%s/%s" % (self.test_dir, image_id) loc = location.get_location_from_uri(uri, conf=self.conf) data = b"" for offset in range(len(file_contents)): (image_file, image_size) = self.store.get(loc, offset=offset, chunk_size=1) for chunk in image_file: data += chunk self.assertEqual(file_contents, data) data = b"" chunk_size = 5 (image_file, image_size) = self.store.get(loc, offset=chunk_size, chunk_size=chunk_size) for chunk in image_file: data += chunk self.assertEqual(b'00000', data) self.assertEqual(chunk_size, image_size) def test_get_non_existing(self): """ Test that trying to retrieve a file that doesn't exist raises an error """ loc = location.get_location_from_uri( "file:///%s/non-existing" % self.test_dir, conf=self.conf) self.assertRaises(exceptions.NotFound, self.store.get, loc) def _do_test_add(self, enable_thin_provisoning): """Test that we can add an image via the filesystem backend.""" self.config(filesystem_store_chunk_size=units.Ki, filesystem_thin_provisioning=enable_thin_provisoning, group='glance_store') self.store.configure() filesystem.ChunkedFile.CHUNKSIZE = units.Ki expected_image_id = str(uuid.uuid4()) expected_file_size = 5 * units.Ki # 5K expected_file_contents = b"*" * expected_file_size expected_checksum = md5(expected_file_contents, usedforsecurity=False).hexdigest() expected_multihash = hashlib.sha256(expected_file_contents).hexdigest() expected_location = "file://%s/%s" % (self.test_dir, expected_image_id) image_file = six.BytesIO(expected_file_contents) loc, size, checksum, multihash, _ = self.store.add( expected_image_id, image_file, expected_file_size, self.hash_algo) self.assertEqual(expected_location, loc) self.assertEqual(expected_file_size, size) self.assertEqual(expected_checksum, checksum) self.assertEqual(expected_multihash, multihash) uri = "file:///%s/%s" % (self.test_dir, expected_image_id) loc = location.get_location_from_uri(uri, conf=self.conf) (new_image_file, new_image_size) = self.store.get(loc) new_image_contents = b"" new_image_file_size = 0 for chunk in new_image_file: new_image_file_size += len(chunk) new_image_contents += chunk self.assertEqual(expected_file_contents, new_image_contents) self.assertEqual(expected_file_size, new_image_file_size) def test_thin_provisioning_is_disabled_by_default(self): self.assertEqual(self.store.thin_provisioning, False) def test_add_with_thick_provisioning(self): self._do_test_add(enable_thin_provisoning=False) def test_add_with_thin_provisioning(self): self._do_test_add(enable_thin_provisoning=True) def test_add_thick_provisioning_with_holes_in_file(self): """ Tests that a file which contains null bytes chunks is fully written with a thick provisioning configuration. """ chunk_size = units.Ki # 1K content = b"*" * chunk_size + b"\x00" * chunk_size + b"*" * chunk_size self._do_test_thin_provisioning(content, 3 * chunk_size, 0, 3, False) def test_add_thin_provisioning_with_holes_in_file(self): """ Tests that a file which contains null bytes chunks is sparsified with a thin provisioning configuration. """ chunk_size = units.Ki # 1K content = b"*" * chunk_size + b"\x00" * chunk_size + b"*" * chunk_size self._do_test_thin_provisioning(content, 3 * chunk_size, 1, 2, True) def test_add_thick_provisioning_without_holes_in_file(self): """ Tests that a file which not contain null bytes chunks is fully written with a thick provisioning configuration. """ chunk_size = units.Ki # 1K content = b"*" * 3 * chunk_size self._do_test_thin_provisioning(content, 3 * chunk_size, 0, 3, False) def test_add_thin_provisioning_without_holes_in_file(self): """ Tests that a file which not contain null bytes chunks is fully written with a thin provisioning configuration. """ chunk_size = units.Ki # 1K content = b"*" * 3 * chunk_size self._do_test_thin_provisioning(content, 3 * chunk_size, 0, 3, True) def test_add_thick_provisioning_with_partial_holes_in_file(self): """ Tests that a file which contains null bytes not aligned with chunk size is fully written with a thick provisioning configuration. """ chunk_size = units.Ki # 1K my_chunk = int(chunk_size * 1.5) content = b"*" * my_chunk + b"\x00" * my_chunk + b"*" * my_chunk self._do_test_thin_provisioning(content, 3 * my_chunk, 0, 5, False) def test_add_thin_provisioning_with_partial_holes_in_file(self): """ Tests that a file which contains null bytes not aligned with chunk size is sparsified with a thin provisioning configuration. """ chunk_size = units.Ki # 1K my_chunk = int(chunk_size * 1.5) content = b"*" * my_chunk + b"\x00" * my_chunk + b"*" * my_chunk self._do_test_thin_provisioning(content, 3 * my_chunk, 1, 4, True) def _do_test_thin_provisioning(self, content, size, truncate, write, thin): self.config(filesystem_store_chunk_size=units.Ki, filesystem_thin_provisioning=thin, group='glance_store') self.store.configure() image_file = six.BytesIO(content) image_id = str(uuid.uuid4()) with mock.patch.object(builtins, 'open') as popen: self.store.add(image_id, image_file, size, self.hash_algo) write_count = popen.return_value.__enter__().write.call_count truncate_count = popen.return_value.__enter__().truncate.call_count self.assertEqual(write_count, write) self.assertEqual(truncate_count, truncate) def test_add_with_verifier(self): """Test that 'verifier.update' is called when verifier is provided.""" verifier = mock.MagicMock(name='mock_verifier') self.config(filesystem_store_chunk_size=units.Ki, group='glance_store') self.store.configure() image_id = str(uuid.uuid4()) file_size = units.Ki # 1K file_contents = b"*" * file_size image_file = six.BytesIO(file_contents) self.store.add(image_id, image_file, file_size, self.hash_algo, verifier=verifier) verifier.update.assert_called_with(file_contents) def test_add_check_metadata_with_invalid_mountpoint_location(self): in_metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'}] location, size, checksum, multihash, metadata = self._store_image( in_metadata) self.assertEqual({}, metadata) def test_add_check_metadata_list_with_invalid_mountpoint_locations(self): in_metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'}, {'id': 'xyz1234', 'mountpoint': '/pqr/images'}] location, size, checksum, multihash, metadata = self._store_image( in_metadata) self.assertEqual({}, metadata) def test_add_check_metadata_list_with_valid_mountpoint_locations(self): in_metadata = [{'id': 'abcdefg', 'mountpoint': '/tmp'}, {'id': 'xyz1234', 'mountpoint': '/xyz'}] location, size, checksum, multihash, metadata = self._store_image( in_metadata) self.assertEqual(in_metadata[0], metadata) def test_add_check_metadata_bad_nosuch_file(self): expected_image_id = str(uuid.uuid4()) jsonfilename = os.path.join(self.test_dir, "storage_metadata.%s" % expected_image_id) self.config(filesystem_store_metadata_file=jsonfilename, group="glance_store") expected_file_size = 10 expected_file_contents = b"*" * expected_file_size image_file = six.BytesIO(expected_file_contents) location, size, checksum, multihash, metadata = self.store.add( expected_image_id, image_file, expected_file_size, self.hash_algo) self.assertEqual(metadata, {}) def test_add_already_existing(self): """ Tests that adding an image with an existing identifier raises an appropriate exception """ filesystem.ChunkedFile.CHUNKSIZE = units.Ki image_id = str(uuid.uuid4()) file_size = 5 * units.Ki # 5K file_contents = b"*" * file_size image_file = six.BytesIO(file_contents) location, size, checksum, multihash, _ = self.store.add( image_id, image_file, file_size, self.hash_algo) image_file = six.BytesIO(b"nevergonnamakeit") self.assertRaises(exceptions.Duplicate, self.store.add, image_id, image_file, 0, self.hash_algo) def _do_test_add_write_failure(self, errno, exception): filesystem.ChunkedFile.CHUNKSIZE = units.Ki image_id = str(uuid.uuid4()) file_size = 5 * units.Ki # 5K file_contents = b"*" * file_size path = os.path.join(self.test_dir, image_id) image_file = six.BytesIO(file_contents) with mock.patch.object(builtins, 'open') as popen: e = IOError() e.errno = errno popen.side_effect = e self.assertRaises(exception, self.store.add, image_id, image_file, 0, self.hash_algo) self.assertFalse(os.path.exists(path)) def test_add_storage_full(self): """ Tests that adding an image without enough space on disk raises an appropriate exception """ self._do_test_add_write_failure(errno.ENOSPC, exceptions.StorageFull) def test_add_file_too_big(self): """ Tests that adding an excessively large image file raises an appropriate exception """ self._do_test_add_write_failure(errno.EFBIG, exceptions.StorageFull) def test_add_storage_write_denied(self): """ Tests that adding an image with insufficient filestore permissions raises an appropriate exception """ self._do_test_add_write_failure(errno.EACCES, exceptions.StorageWriteDenied) def test_add_other_failure(self): """ Tests that a non-space-related IOError does not raise a StorageFull exceptions. """ self._do_test_add_write_failure(errno.ENOTDIR, IOError) def test_add_cleanup_on_read_failure(self): """ Tests the partial image file is cleaned up after a read failure. """ filesystem.ChunkedFile.CHUNKSIZE = units.Ki image_id = str(uuid.uuid4()) file_size = 5 * units.Ki # 5K file_contents = b"*" * file_size path = os.path.join(self.test_dir, image_id) image_file = six.BytesIO(file_contents) def fake_Error(size): raise AttributeError() with mock.patch.object(image_file, 'read') as mock_read: mock_read.side_effect = fake_Error self.assertRaises(AttributeError, self.store.add, image_id, image_file, 0, self.hash_algo) self.assertFalse(os.path.exists(path)) def test_delete(self): """ Test we can delete an existing image in the filesystem store """ # First add an image image_id = str(uuid.uuid4()) file_size = 5 * units.Ki # 5K file_contents = b"*" * file_size image_file = six.BytesIO(file_contents) loc, size, checksum, multihash, _ = self.store.add( image_id, image_file, file_size, self.hash_algo) # Now check that we can delete
<gh_stars>10-100 """Straw module Straw enables programmatic access to .hic files. .hic files store the contact matrices from Hi-C experiments and the normalization and expected vectors, along with meta-data in the header. The main function, straw, takes in the normalization, the filename or URL, chromosome1 (and optional range), chromosome2 (and optional range), whether the bins desired are fragment or base pair delimited, and bin size. It then reads the header, follows the various pointers to the desired matrix and normalization vector, and stores as [x, y, count] Usage: straw <NONE/VC/VC_SQRT/KR> <hicFile(s)> <chr1>[:x1:x2] <chr2>[:y1:y2] <\ BP/FRAG> <binsize> See https://github.com/theaidenlab/straw/wiki/Python for more documentation """ from __future__ import absolute_import, division, print_function # unicode_literals __author__ = "<NAME> and <NAME>" __license__ = "MIT" import sys import struct import zlib import requests import io blockMap = dict() # global version version=0 def __readcstr(f): """ Helper function for reading in C-style string from file """ buf = b"" while True: b = f.read(1) if b is None or b == b"\0": return buf.decode("utf-8") elif b == "": raise EOFError("Buffer unexpectedly empty while trying to read null-terminated string") else: buf += b def readHeader(req, chr1, chr2, posilist): """ Reads the header Args: req (file): File to read from chr1 (str): Chromosome 1 chr2 (str): Chromosome 2 c1pos1 (int, optional): Starting range of chromosome1 output c1pos2 (int, optional): Stopping range of chromosome1 output c2pos1 (int, optional): Starting range of chromosome2 output c2pos2 (int, optional): Stopping range of chromosome2 output Returns: list: master index, chromosome1 index, chromosome2 index """ magic_string = struct.unpack('<3s', req.read(3))[0] req.read(1) if (magic_string != b"HIC"): print('This does not appear to be a HiC file magic string is incorrect') return -1 global version version = struct.unpack('<i', req.read(4))[0] if (version < 6): print("Version {0} no longer supported".format(str(version))) return -1 # print('HiC version:', version) master = struct.unpack('<q', req.read(8))[0] # print('Master: ', master) genome = b"" c = req.read(1) while (c != b'\0'): genome += c c = req.read(1) # print('Genome: ', genome.decode('utf-8')) # read and throw away attribute dictionary (stats+graphs) nattributes = struct.unpack('<i', req.read(4))[0] for x in range(nattributes): key = __readcstr(req) # print('Attributes:', key, ': ', end='') value = __readcstr(req) # print(value) nChrs = struct.unpack('<i', req.read(4))[0] found1 = False found2 = False for i in range(0, nChrs): name = __readcstr(req) if not name.startswith('chr'): name = 'chr' + name length = struct.unpack('<i', req.read(4))[0] if name == chr1: # print(name, ': ', length) found1 = True chr1ind = i if (posilist[0] == -100): posilist[0] = 0 posilist[1] = length if name == chr2: # print(name, ': ', length) found2 = True chr2ind = i if (posilist[2] == -100): posilist[2] = 0 posilist[3] = length if (not found1) or (not found2): print("One of the chromosomes wasn't found in the file. Check that the chromosome name matches the genome.\n") return -1 return [master, chr1ind, chr2ind, posilist[0], posilist[1], posilist[2], posilist[3]] def readFooter(req, c1, c2, norm, unit, resolution): """Reads the footer, which contains all the expected and normalization vectors. Presumes file pointer is in correct position Args: req (file): File to read from; presumes file pointer is in correct position chr1 (str): Chromosome 1 chr2 (str): Chromosome 2 norm (str): Normalization type, one of NONE, VC, KR, VC_SQRT unit (str): One of BP or FRAG resolution (int): Bin size Returns: list: File position of matrix, position+size chr1 normalization vector, position+size chr2 normalization vector """ c1NormEntry = dict() c2NormEntry = dict() nBytes = struct.unpack('<i', req.read(4))[0] # print('Footer nBytes: ', nBytes) key = str(c1) + "_" + str(c2) nEntries = struct.unpack('<i', req.read(4))[0] # print('Footer nEntries: ', nEntries) found = False for i in range(nEntries): stri = __readcstr(req) # print('Footer: ', stri) fpos = struct.unpack('<q', req.read(8))[0] # print('Footer fpos: ', fpos) sizeinbytes = struct.unpack('<i', req.read(4))[0] # print('Footer size in bytes: ', sizeinbytes) if stri == key: myFilePos = fpos found = True # print('Footer: ', stri, key, fpos) if not found: print("File doesn't have the given chr_chr map\n") if norm == "NONE": return [myFilePos, 0, 0] nExpectedValues = struct.unpack('<i', req.read(4))[0] for i in range(nExpectedValues): str_ = __readcstr(req) binSize = struct.unpack('<i', req.read(4))[0] nValues = struct.unpack('<i', req.read(4))[0] for j in range(nValues): v = struct.unpack('<d',req.read(8))[0] nNormalizationFactors = struct.unpack('<i',req.read(4))[0] for j in range(nNormalizationFactors): chrIdx = struct.unpack('<i',req.read(4))[0] v = struct.unpack('<d',req.read(8))[0] nExpectedValues = struct.unpack('<i',req.read(4))[0] for i in range(nExpectedValues): str_ = __readcstr(req) str_ = __readcstr(req) binSize = struct.unpack('<i',req.read(4))[0] nValues = struct.unpack('<i',req.read(4))[0] for j in range(nValues): v = struct.unpack('<d',req.read(8))[0] nNormalizationFactors = struct.unpack('<i',req.read(4))[0] for j in range(nNormalizationFactors): chrIdx = struct.unpack('<i',req.read(4))[0] v = struct.unpack('<d',req.read(8))[0] nEntries = struct.unpack('<i',req.read(4))[0] found1 = False found2 = False for i in range(nEntries): normtype = __readcstr(req) chrIdx = struct.unpack('<i',req.read(4))[0] unit1 = __readcstr(req) resolution1 = struct.unpack('<i',req.read(4))[0] filePosition = struct.unpack('<q',req.read(8))[0] sizeInBytes = struct.unpack('<i',req.read(4))[0] if (chrIdx==c1 and normtype==norm and unit1==unit and resolution1==resolution): c1NormEntry['position']=filePosition c1NormEntry['size']=sizeInBytes found1=True if (chrIdx==c2 and normtype==norm and unit1==unit and resolution1==resolution): c2NormEntry['position']=filePosition c2NormEntry['size']=sizeInBytes found2=True if ((not found1) or (not found2)): print("File did not contain {0} normalization vectors for one or both chromosomes at {1} {2}\n".format(norm, resolution, unit)) return -1 return [myFilePos, c1NormEntry, c2NormEntry] def readMatrixZoomData(req, myunit, mybinsize): """ Reads the Matrix Zoom Data, which gives pointer list for blocks for the data. Presumes file pointer is in correct position Args: req (file): File to read from; presumes file pointer is in correct position myunit (str): Unit (BP or FRAG) we're searching for mybinsize (int): Resolution we're searching for Returns: list containing boolean indicating if we found appropriate matrix, and if so, the counts for the bins and columns """ unit = __readcstr(req) # print(unit) temp = struct.unpack('<i', req.read(4))[0] # print(temp) temp2 = struct.unpack('<f', req.read(4))[0] # print(temp2) temp2 = struct.unpack('<f', req.read(4))[0] # print(temp2) temp2 = struct.unpack('<f', req.read(4))[0] # print(temp2) temp2 = struct.unpack('<f', req.read(4))[0] # print(temp2) binSize = struct.unpack('<i', req.read(4))[0] #print('MatrixZoom: ', binSize) blockBinCount = struct.unpack('<i', req.read(4))[0] #print('MatrixZoom: ', blockBinCount) blockColumnCount = struct.unpack('<i', req.read(4))[0] #print('MatrixZoom: ', blockColumnCount) storeBlockData = False # print(unit, binSize) #for the initial myBlockBinCount = -1 myBlockColumnCount = -1 if myunit == unit and mybinsize == binSize: myBlockBinCount = blockBinCount myBlockColumnCount = blockColumnCount storeBlockData = True nBlocks = struct.unpack('<i', req.read(4))[0] # print(nBlocks) for b in range(nBlocks): blockNumber = struct.unpack('<i', req.read(4))[0] filePosition = struct.unpack('<q', req.read(8))[0] blockSizeInBytes = struct.unpack('<i', req.read(4))[0] entry = dict() entry['size'] = blockSizeInBytes entry['position'] = filePosition # print(b, blockNumber, entry) if storeBlockData: blockMap[blockNumber] = entry return [storeBlockData, myBlockBinCount, myBlockColumnCount, binSize] # , binSize ? def readMatrix(req, unit, binsize): """ Reads the matrix - that is, finds the appropriate pointers to block data and stores them. Needs to read through headers of zoom data to find appropriate matrix. Presumes file pointer is in correct position. Args: req (file): File to read from; presumes file pointer is in correct position unit (str): Unit to search for (BP or FRAG) binsize (int): Resolution to search for Returns: list containing block bin count and block column count of matrix """ c1 = struct.unpack('<i', req.read(4))[0] #print('read matrix c1:', c1) c2 = struct.unpack('<i', req.read(4))[0] #print('read matrix c2:', c2) nRes = struct.unpack('<i', req.read(4))[0] #print('nRes: ', nRes) i = 0 found = False blockBinCount = -1 blockColumnCount = -1 res = [] while i < nRes and (not found): list1 = readMatrixZoomData(req, unit, binsize) found = list1[0] if not found: res.append(list1[3]) if list1[1] != -1 and list1[2] != -1: blockBinCount = list1[1] blockColumnCount = list1[2] i = i+1 if not found: raise ValueError('Error finding block data. Resolution should be in: {}'.format(res)) return [blockBinCount, blockColumnCount] def getBlockNumbersForRegionFromBinPosition(regionIndices, blockBinCount, blockColumnCount, intra): """ Gets the block numbers we will need for a specific region; used when the range to extract is sent in as a parameter Args: regionIndices (array): Array of ints giving range blockBinCount (int): The block bin count of the matrix blockColumnCount (int): The block column count of the matrix intra: Flag indicating if this is an intrachromosomal matrix Returns: blockSet (set): A set of blocks to print """ col1 = int(regionIndices[0] / blockBinCount) col2 = int((regionIndices[1] + 1) / blockBinCount) row1 = int(regionIndices[2] / blockBinCount) row2 = int((regionIndices[3] + 1) / blockBinCount) blocksSet = set() # print(str(col1)+"\t"+str(col2)+"\t"+str(row1)+"\t"+str(row2)) for r in range(row1, row2+1): for c in range(col1, col2+1): blockNumber = r * blockColumnCount +
""" Linear solvers that are used to solve for the gradient of an OpenMDAO System. (Not to be confused with the OpenMDAO Solver classes.) """ # pylint: disable=E0611, F0401 import numpy as np from scipy.sparse.linalg import gmres, LinearOperator from openmdao.main.mpiwrap import MPI from openmdao.util.graph import fix_single_tuple from openmdao.util.log import logger if MPI: from petsc4py import PETSc else: class PETSc(object): # Dummy class so things parse. pass class LinearSolver(object): """ A base class for linear solvers """ def __init__(self, system): """ Set up any LinearSolver object """ self._system = system self.options = system.options def _norm(self): """ Computes the norm of the linear residual """ system = self._system system.rhs_vec.array[:] = 0.0 system.applyJ(system.vector_vars.keys()) system.rhs_vec.array[:] *= -1.0 system.rhs_vec.array[:] += system.rhs_buf[:] if MPI: system.rhs_vec.petsc.assemble() return system.rhs_vec.petsc.norm() else: return np.linalg.norm(system.rhs_vec.array) class ScipyGMRES(LinearSolver): """ Scipy's GMRES Solver. This is a serial solver, so it should never be used in an MPI setting. """ def __init__(self, system): """ Set up ScipyGMRES object """ super(ScipyGMRES, self).__init__(system) n_edge = system.vec['f'].array.size system.rhs_buf = np.zeros((n_edge, )) system.sol_buf = np.zeros((n_edge, )) self.A = LinearOperator((n_edge, n_edge), matvec=self.mult, dtype=float) def calc_gradient(self, inputs, outputs, return_format='array'): """ Run GMRES solver to return a Jacobian of outputs with respect to inputs. """ system = self._system RHS = system.rhs_buf A = self.A # Size the problem num_input = system.get_size(inputs) num_output = system.get_size(outputs) if return_format == 'dict': J = {} for okey in outputs: J[okey] = {} for ikey in inputs: if isinstance(ikey, tuple): ikey = ikey[0] J[okey][ikey] = None else: J = np.zeros((num_output, num_input)) if system.mode == 'adjoint': outputs, inputs = inputs, outputs # If Forward mode, solve linear system for each parameter # If Adjoint mode, solve linear system for each requested output j = 0 for param in inputs: if isinstance(param, tuple): param = param[0] in_indices = system.vec['u'].indices(system.scope, param) jbase = j for irhs in in_indices: RHS[irhs] = 1.0 # Call GMRES to solve the linear system dx = self.solve(RHS) RHS[irhs] = 0.0 i = 0 for item in outputs: if isinstance(item, tuple): item = item[0] out_indices = system.vec['u'].indices(system.scope, item) nk = len(out_indices) if return_format == 'dict': if system.mode == 'forward': if J[item][param] is None: J[item][param] = np.zeros((nk, len(in_indices))) J[item][param][:, j-jbase] = dx[out_indices] else: if J[param][item] is None: J[param][item] = np.zeros((len(in_indices), nk)) J[param][item][j-jbase, :] = dx[out_indices] else: if system.mode == 'forward': J[i:i+nk, j] = dx[out_indices] else: J[j, i:i+nk] = dx[out_indices] i += nk j += 1 #print inputs, '\n', outputs, '\n', J return J def solve(self, arg): """ Solve the coupled equations for a new state vector that nulls the residual. Used by the Newton solvers.""" system = self._system options = self.options A = self.A #print system.name, 'Linear solution start vec', system.rhs_vec.array # Call GMRES to solve the linear system dx, info = gmres(A, arg, tol=options.atol, maxiter=options.maxiter) if info > 0: msg = "ERROR in calc_gradient in '%s': gmres failed to converge " \ "after %d iterations" logger.error(msg, system.name, info) elif info < 0: msg = "ERROR in calc_gradient in '%s': gmres failed" logger.error(msg, system.name) #print system.name, 'Linear solution vec', -dx return dx def mult(self, arg): """ GMRES Callback: applies Jacobian matrix. Mode is determined by the system.""" system = self._system system.sol_vec.array[:] = arg[:] # Start with a clean slate system.rhs_vec.array[:] = 0.0 system.clear_dp() if system._parent_system: vnames = system._parent_system._relevant_vars else: vnames = system.flat_vars.keys() system.applyJ(vnames) #print system.name, 'mult: arg, result', arg, system.rhs_vec.array[:] #print system.rhs_vec.keys() return system.rhs_vec.array[:] class PETSc_KSP(LinearSolver): """ PETSc's KSP solver with preconditioning. MPI is supported.""" def __init__(self, system): """ Set up KSP object """ super(PETSc_KSP, self).__init__(system) lsize = np.sum(system.local_var_sizes[system.mpi.rank, :]) size = np.sum(system.local_var_sizes) jac_mat = PETSc.Mat().createPython([(lsize, size), (lsize, size)], comm=system.mpi.comm) jac_mat.setPythonContext(self) jac_mat.setUp() self.ksp = PETSc.KSP().create(comm=system.mpi.comm) self.ksp.setOperators(jac_mat) self.ksp.setType('fgmres') self.ksp.setGMRESRestart(1000) self.ksp.setPCSide(PETSc.PC.Side.RIGHT) pc_mat = self.ksp.getPC() pc_mat.setType('python') pc_mat.setPythonContext(self) # # Set these in the system # #mpiprint("KSP: creating sol buf, size %d" % lsize) system.sol_buf = PETSc.Vec().createWithArray(np.zeros(lsize), comm=system.mpi.comm) # #mpiprint("KSP: creating rhs buf, size %d" % lsize) system.rhs_buf = PETSc.Vec().createWithArray(np.zeros(lsize), comm=system.mpi.comm) def calc_gradient(self, inputs, outputs, return_format='dict'): """Returns a nested dict of sensitivities if return_format == 'dict'. """ if return_format == 'dict': return self._J_dict_solve(inputs, outputs) else: raise RuntimeError("unsupported solve return_format '%s'" % return_format) def _J_dict_solve(self, inputs, outputs): """Returns a dict of sensitivities for given inputs and outputs. """ system = self._system options = self.options name2collapsed = system.scope.name2collapsed inputs = [fix_single_tuple(x) for x in inputs] outputs = [fix_single_tuple(x) for x in outputs] J = {} for okey in outputs: J[okey] = {} for ikey in inputs: J[okey][ikey] = None if system.mode == 'adjoint': outputs, inputs = inputs, outputs self.ksp.setTolerances(max_it=options.maxiter, atol=options.atol, rtol=options.rtol) j = 0 for param in inputs: param_tup = name2collapsed[param] param_size = system.get_size(param) jbase = j for irhs in xrange(param_size): solvec = system._compute_derivatives(param_tup, irhs) for out in outputs: out_size = system.get_size(out) if system.mode == 'forward': if out in solvec: if J[out][param] is None: J[out][param] = np.zeros((out_size, param_size)) J[out][param][:, j-jbase] = solvec[out] else: del J[out][param] else: if out in solvec: if J[param][out] is None: J[param][out] = np.zeros((out_size, param_size)) J[param][out][j-jbase, :] = solvec[out] else: del J[param][out] j += 1 return J def newton(self): """ Solve the coupled equations for a new state vector that nulls the residual. Used by the Newton solvers.""" system = self._system options = self.options self.ksp.setTolerances(max_it=options.maxiter, atol=options.atol, rtol=options.rtol) system.rhs_vec.array[:] = system.vec['f'].array[:] #print 'newton start vec', system.vec['f'].array[:] system.sol_buf.array[:] = system.sol_vec.array[:] system.rhs_buf.array[:] = system.rhs_vec.array[:] system.ln_solver.ksp.solve(system.rhs_buf, system.sol_buf) system.vec['df'].array[:] = -system.sol_buf.array[:] #print 'newton solution vec', system.vec['df'].array[:] def mult(self, mat, sol_vec, rhs_vec): """ KSP Callback: applies Jacobian matrix. Mode is determined by the system.""" system = self._system system.sol_vec.array[:] = sol_vec.array[:] # Start with a clean slate system.rhs_vec.array[:] = 0.0 system.clear_dp() system.applyJ(system.vector_vars.keys()) rhs_vec.array[:] = system.rhs_vec.array[:] # mpiprint('names = %s' % system.sol_vec.keys()) #mpiprint('arg = %s, result=%s' % (sol_vec.array, rhs_vec.array)) #mpiprint('df, du, dp', system.vec['df'].array, system.vec['du'].array, system.vec['dp'].array) def apply(self, mat, sol_vec, rhs_vec): """ Applies preconditioner """ #system = self._system # TODO - Preconditioning is not supported yet, so mimic an Identity # matrix. rhs_vec.array[:] = sol_vec.array[:] #system.rhs_vec.array[:] = sol_vec.array[:] #system.solve_precon() #rhs_vec.array[:] = system.sol_vec.array[:] class LinearGS(LinearSolver): """ Linear block Gauss Seidel. MPI is not supported yet. Serial block solve of D x = b - (L+U) x """ def __init__(self, system): """ Set up LinearGS object """ super(LinearGS, self).__init__(system) lsize = np.sum(system.local_var_sizes[system.mpi.rank, :]) system.sol_buf = np.zeros(lsize) system.rhs_buf = np.zeros(lsize) def calc_gradient(self, inputs, outputs, return_format='array'): """ Run GMRES solver to return a Jacobian of outputs with respect to inputs. """ system = self._system # Size the problem # TODO - Support for array slice inputs/outputs try: num_input = system.get_size(inputs) num_output = system.get_size(outputs) except KeyError as exc: if '[' in str(exc): msg = 'Array slice inputs and outputs currently not supported.' raise RuntimeError(msg) else: raise n_edge = system.vec['f'].array.size if return_format == 'dict': J = {} for okey in outputs: J[okey] = {} for ikey in inputs: if isinstance(ikey, tuple): ikey = ikey[0] J[okey][ikey] = None else: J = np.zeros((num_output, num_input)) if system.mode == 'adjoint': outputs, inputs = inputs, outputs # If Forward mode, solve linear system for each parameter # If Reverse mode, solve linear system for each requested output j = 0 for param in inputs: if isinstance(param, tuple): param = param[0] in_indices = system.vec['u'].indices(system.scope, param) jbase = j for irhs in in_indices: system.clear_dp() system.sol_vec.array[:] = 0.0 system.rhs_vec.array[:] = 0.0 system.rhs_vec.array[irhs] = 1.0 # Perform LinearGS solve dx = self.solve(system.rhs_vec.array) #system.rhs_vec.array[irhs] = 0.0 i = 0 for item in outputs: if isinstance(item, tuple): item = item[0] out_indices = system.vec['u'].indices(system.scope, item) nk = len(out_indices) if return_format == 'dict': if system.mode == 'forward': if J[item][param] is None: J[item][param] = np.zeros((nk, len(in_indices))) J[item][param][:, j-jbase] = dx[out_indices] else: if J[param][item] is None: J[param][item] = np.zeros((len(in_indices), nk)) J[param][item][j-jbase, :] = dx[out_indices] else: if system.mode == 'forward': J[i:i+nk, j] = dx[out_indices] else: J[j, i:i+nk] = dx[out_indices] i += nk j += 1 #print inputs, '\n', outputs, '\n', J return J def solve(self, arg): """ Executes an iterative solver """ system = self._system system.rhs_buf[:] = arg[:] system.sol_buf[:] = system.sol_vec.array[:] options = self.options system = self._system norm0, norm = 1.0, 1.0 counter = 0 while counter < options.maxiter and norm > options.atol and
import time from typing import List, Optional, Tuple, Union from lightly.openapi_generated.swagger_client.models.datasource_config import DatasourceConfig from lightly.openapi_generated.swagger_client.models.datasource_purpose import DatasourcePurpose from lightly.openapi_generated.swagger_client.models.datasource_processed_until_timestamp_request import DatasourceProcessedUntilTimestampRequest from lightly.openapi_generated.swagger_client.models.datasource_processed_until_timestamp_response import DatasourceProcessedUntilTimestampResponse from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data import DatasourceRawSamplesData from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_predictions_data import DatasourceRawSamplesPredictionsData class _DatasourcesMixin: def _download_raw_files( self, download_function: Union[ "DatasourcesApi.get_list_of_raw_samples_from_datasource_by_dataset_id", "DatasourcesApi.get_list_of_raw_samples_predictions_from_datasource_by_dataset_id", "DatasourcesApi.get_list_of_raw_samples_metadata_from_datasource_by_dataset_id" ], from_: int = 0, to: int = None, relevant_filenames_file_name: str = None, **kwargs ): if to is None: to = int(time.time()) relevant_filenames_kwargs = { "relevant_filenames_file_name": relevant_filenames_file_name } if relevant_filenames_file_name else dict() response: DatasourceRawSamplesData = download_function( dataset_id=self.dataset_id, _from=from_, to=to, **relevant_filenames_kwargs, **kwargs ) cursor = response.cursor samples = response.data while response.has_more: response: DatasourceRawSamplesData = download_function( dataset_id=self.dataset_id, cursor=cursor, **relevant_filenames_kwargs, **kwargs ) cursor = response.cursor samples.extend(response.data) samples = [(s.file_name, s.read_url) for s in samples] return samples def download_raw_samples( self, from_: int = 0, to: int = None, relevant_filenames_file_name: str = None, ) -> List[Tuple[str, str]]: """Downloads all filenames and read urls from the datasource between `from_` and `to`. Samples which have timestamp == `from_` or timestamp == `to` will also be included. Args: from_: Unix timestamp from which on samples are downloaded. to: Unix timestamp up to and including which samples are downloaded. relevant_filenames_file_name: The path to the relevant filenames text file in the cloud bucket. The path is relative to the datasource root. Returns: A list of (filename, url) tuples, where each tuple represents a sample """ samples = self._download_raw_files( self._datasources_api.get_list_of_raw_samples_from_datasource_by_dataset_id, from_, to, relevant_filenames_file_name ) return samples def download_raw_predictions( self, task_name: str, from_: int = 0, to: int = None, relevant_filenames_file_name: str = None, ) -> List[Tuple[str, str]]: """Downloads all prediction filenames and read urls from the datasource between `from_` and `to`. Samples which have timestamp == `from_` or timestamp == `to` will also be included. Args: task_name: Name of the prediction task. from_: Unix timestamp from which on samples are downloaded. to: Unix timestamp up to and including which samples are downloaded. relevant_filenames_file_name: The path to the relevant filenames text file in the cloud bucket. The path is relative to the datasource root. Returns: A list of (filename, url) tuples, where each tuple represents a sample """ samples = self._download_raw_files( self._datasources_api.get_list_of_raw_samples_predictions_from_datasource_by_dataset_id, from_, to, relevant_filenames_file_name, task_name=task_name ) return samples def download_raw_metadata( self, from_: int = 0, to: int = None, relevant_filenames_file_name: str = None ) -> List[Tuple[str, str]]: """Downloads all metadata filenames and read urls from the datasource between `from_` and `to`. Samples which have timestamp == `from_` or timestamp == `to` will also be included. Args: from_: Unix timestamp from which on samples are downloaded. to: Unix timestamp up to and including which samples are downloaded. relevant_filenames_file_name: The path to the relevant filenames text file in the cloud bucket. The path is relative to the datasource root. Returns: A list of (filename, url) tuples, where each tuple represents a sample """ samples = self._download_raw_files( self._datasources_api.get_list_of_raw_samples_metadata_from_datasource_by_dataset_id, from_, to, relevant_filenames_file_name ) return samples def download_new_raw_samples(self) -> List[Tuple[str, str]]: """Downloads filenames and read urls of unprocessed samples from the datasource. All samples after the timestamp of `ApiWorkflowClient.get_processed_until_timestamp()` are fetched. After downloading the samples the timestamp is updated to the current time. This function can be repeatedly called to retrieve new samples from the datasource. Returns: A list of (filename, url) tuples, where each tuple represents a sample """ from_ = self.get_processed_until_timestamp() if from_ != 0: # We already processed samples at some point. # Add 1 because the samples with timestamp == from_ # have already been processed from_ += 1 to = int(time.time()) data = self.download_raw_samples(from_=from_, to=to) self.update_processed_until_timestamp(timestamp=to) return data def get_processed_until_timestamp(self) -> int: """Returns the timestamp until which samples have been processed. Returns: Unix timestamp of last processed sample """ response: DatasourceProcessedUntilTimestampResponse = ( self._datasources_api.get_datasource_processed_until_timestamp_by_dataset_id( dataset_id=self.dataset_id ) ) timestamp = int(response.processed_until_timestamp) return timestamp def update_processed_until_timestamp(self, timestamp: int) -> None: """Sets the timestamp until which samples have been processed. Args: timestamp: Unix timestamp of last processed sample """ body = DatasourceProcessedUntilTimestampRequest( processed_until_timestamp=timestamp ) self._datasources_api.update_datasource_processed_until_timestamp_by_dataset_id( dataset_id=self.dataset_id, body=body ) def get_datasource(self) -> DatasourceConfig: """Calls the api to return the datasource of the current dataset. Returns: Datasource data of the datasource of the current dataset. Raises: ApiException if no datasource was configured. """ return self._datasources_api.get_datasource_by_dataset_id( self.dataset_id ) def set_azure_config( self, container_name: str, account_name: str, sas_token: str, thumbnail_suffix: Optional[str] = None, purpose: str = DatasourcePurpose.INPUT_OUTPUT, ) -> None: """Sets the Azure configuration for the datasource of the current dataset. Find a detailed explanation on how to setup Lightly with Azure Blob Storage in our docs: https://docs.lightly.ai/getting_started/dataset_creation/dataset_creation_azure_storage.html# Args: container_name: Container name of the dataset, for example: "my-container/path/to/my/data". account_name: Azure account name. sas_token: Secure Access Signature token. thumbnail_suffix: Where to save thumbnails of the images in the dataset, for example ".lightly/thumbnails/[filename]_thumb.[extension]". Set to None to disable thumbnails and use the full images from the datasource instead. purpose: Datasource purpose, determines if datasource is read only (INPUT) or if new data can be uploaded to the datasource through Lightly (INPUT_OUTPUT). """ # TODO: Use DatasourceConfigAzure once we switch/update the api generator. self._datasources_api.update_datasource_by_dataset_id( body={ 'type': 'AZURE', 'fullPath': container_name, 'thumbSuffix': thumbnail_suffix, 'accountName': account_name, 'accountKey': sas_token, 'purpose': purpose, }, dataset_id=self.dataset_id, ) def set_gcs_config( self, resource_path: str, project_id: str, credentials: str, thumbnail_suffix: Optional[str] = None, purpose: str = DatasourcePurpose.INPUT_OUTPUT, ) -> None: """Sets the Google Cloud Storage configuration for the datasource of the current dataset. Find a detailed explanation on how to setup Lightly with Google Cloud Storage in our docs: https://docs.lightly.ai/getting_started/dataset_creation/dataset_creation_gcloud_bucket.html Args: resource_path: GCS url of your dataset, for example: "gs://my_bucket/path/to/my/data" project_id: GCS project id. credentials: Content of the credentials JSON file stringified which you download from Google Cloud Platform. thumbnail_suffix: Where to save thumbnails of the images in the dataset, for example ".lightly/thumbnails/[filename]_thumb.[extension]". Set to None to disable thumbnails and use the full images from the datasource instead. purpose: Datasource purpose, determines if datasource is read only (INPUT) or if new data can be uploaded to the datasource through Lightly (INPUT_OUTPUT). """ # TODO: Use DatasourceConfigGCS once we switch/update the api generator. self._datasources_api.update_datasource_by_dataset_id( body={ 'type': 'GCS', 'fullPath': resource_path, 'thumbSuffix': thumbnail_suffix, 'gcsProjectId': project_id, 'gcsCredentials': credentials, 'purpose': purpose, }, dataset_id=self.dataset_id, ) def set_local_config( self, resource_path: str, thumbnail_suffix: Optional[str] = None, ) -> None: """Sets the local configuration for the datasource of the current dataset. Find a detailed explanation on how to setup Lightly with a local file server in our docs: https://docs.lightly.ai/getting_started/dataset_creation/dataset_creation_local_server.html Args: resource_path: Url to your local file server, for example: "http://localhost:1234/path/to/my/data". thumbnail_suffix: Where to save thumbnails of the images in the dataset, for example ".lightly/thumbnails/[filename]_thumb.[extension]". Set to None to disable thumbnails and use the full images from the datasource instead. """ # TODO: Use DatasourceConfigLocal once we switch/update the api generator. self._datasources_api.update_datasource_by_dataset_id( body={ 'type': 'LOCAL', 'fullPath': resource_path, 'thumbSuffix': thumbnail_suffix, 'purpose': DatasourcePurpose.INPUT_OUTPUT, }, dataset_id=self.dataset_id, ) def set_s3_config( self, resource_path: str, region: str, access_key: str, secret_access_key: str, thumbnail_suffix: Optional[str] = None, purpose: str = DatasourcePurpose.INPUT_OUTPUT, ) -> None: """Sets the S3 configuration for the datasource of the current dataset. Args: resource_path: S3 url of your dataset, for example "s3://my_bucket/path/to/my/data". region: S3 region where the dataset bucket is located, for example "eu-central-1". access_key: S3 access key. secret_access_key: Secret for the S3 access key. thumbnail_suffix: Where to save thumbnails of the images in the dataset, for example ".lightly/thumbnails/[filename]_thumb.[extension]". Set to None to disable thumbnails and use the full images from the datasource instead. purpose: Datasource purpose, determines if datasource is read only (INPUT) or if new data can be uploaded to the datasource through Lightly (INPUT_OUTPUT). """ # TODO: Use DatasourceConfigS3 once we switch/update the api generator. self._datasources_api.update_datasource_by_dataset_id( body={ 'type': 'S3', 'fullPath': resource_path, 'thumbSuffix': thumbnail_suffix, 's3Region': region, 's3AccessKeyId': access_key, 's3SecretAccessKey': secret_access_key, 'purpose': purpose, }, dataset_id=self.dataset_id, ) def get_prediction_read_url( self, filename: str, ): """Returns a read-url for .lightly/predictions/{filename}. Args: filename: Filename for which to get the read-url. Returns the read-url. If the file does not exist, a read-url is returned
# import all of our required libraries for necessary data processing and data requests import numpy as np import pandas as pd from binance.client import Client import joblib import os # define our function to retrieve klines data from binance API def get_data(): ''' This function will execute API call to Binance to retrieve data. We will export the results of this data into the appropriately named dataframe for further feature engineering. ''' client = Client() # establishing our blank client candles = client.get_klines(symbol='BTCUSDT', interval=Client.KLINE_INTERVAL_1DAY, limit=91) # we only need to request the most recent 90 days to calculate our prediction data data = pd.DataFrame(candles, columns=['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close time', 'Quote asset volume', 'Number of trades', 'Taker buy base volume', 'Taker buy quote volume', 'Ignore']) # these column labels are as labelled on the Binance API documentation data.drop(['Close time', 'Ignore'], axis=1, inplace=True) # dropping unneeded columns data['Date'] = data['Date'].apply(lambda x: pd.to_datetime(x, unit='ms')) # converting to proper date format for better visual reference data.set_index('Date', inplace=True) # setting index to date data = data.astype('float64') # converting from object type to float type return data # we will define a function to run prior to calcualting our averages def feat_eng(X_df): ''' Intakes "X" portion of data and outputs selected engineered features ''' X_df['High/Low'] = X_df['High'] - X_df['Low'] X_df['volX'] = X_df['Quote asset volume'] / X_df['Volume'] X_df['quote-buy'] = X_df['Taker buy quote volume'] / X_df['Taker buy base volume'] SMAs = [7,30,90] # 7, 30, and 90 day simple moving averages for val in SMAs: X_df[str(val)+'sma'] = X_df['Close'].rolling(f'{val}D').mean() # using the pandas rolling function to calculate mean values over each desired SMA value return X_df # Now we want to take the most recent data point possible to make our prediction from def X_inputs(X_df): x_input = X_df[-1:] # take the most recent value after calculations x_yesterday = X_df[-2:-1] # values from previous day return x_input, x_yesterday # now to create a function that ties all of these together and gives us our desired inputs for the model def to_predict(): data = get_data() X_df = feat_eng(data) X_input, X_yesterday = X_inputs(X_df) return X_input, X_yesterday # now we must load our saved model using pickle with open("final_model.pkl", "rb") as file: model = joblib.load(file) def add_prediction(X_input, X_yesterday): pred_X = model.predict_proba(X_input)[0] # this gives us our predictor array of confidence # create our new columns based on prediction output X_input['Prediction'] = 1 if pred_X[1] > pred_X[0] else 0 # predicted class based on higher confidence X_input['Confidence'] = pred_X[1] if pred_X[1] > pred_X[0] else pred_X[0] # confidence score (probability) of larger class pred_yesterday = model.predict_proba(X_yesterday)[0] X_yesterday['Prediction'] = 1 if pred_yesterday[1] > pred_yesterday[0] else 0 X_yesterday['Confidence'] = pred_yesterday[1] if pred_yesterday[1] > pred_yesterday[0] else pred_yesterday[0] return X_input, X_yesterday def eval_prediction(X_input, X_yesterday): ''' This function will intake our modified X dataframe from the previous day as well as our current prediction and output a new column which gives the correct label, as well as if the model predicted correctly or not. ''' X_yesterday['True_Label'] = 1 if X_input['Close'].values > X_yesterday['Close'].values else 0 # this gives us the correct label X_yesterday['Correct_Pred'] = 1 if X_yesterday['Prediction'].values == X_yesterday['True_Label'].values else 0 # this gives a 1 for a correct prediction and a 0 for incorrect return X_yesterday # in this version we will be moving away from using sql to store our data and instead using csv files which are convenient and easy to export to google drive or other services. def to_predictions_csv(X_yesterday): ''' This function takes in our fully evaluated predictions and writes them to a CSV file. ''' if os.path.isfile('./CSVs/model_predictions.csv'): X_yesterday.to_csv('./CSVs/model_predictions.csv', mode='a', header=0) else: X_yesterday.to_csv('./CSVs/model_predictions.csv') print('Data written to model_predictions.csv!') # Now that we have imported all of the necesary functions, we can incorporate our process of evaluation # we will now also want to draw on the full CSV file to calculate our ongoing model metrics. # now we need to establish model accuracy measure # this will be the sum of the correct_pred column divided by its length # to do this we will calculate the metric each time the new data is imported and append it to a new column in the dataframe def get_performance(): ''' This function will take in our model performance CSV and add a new column called model accuracy. This will be updated daily as each new prediction is implemented. We will take only the most recent value as an output so that we only add the most recent row to our next CSV. ''' model_data = pd.read_csv('./CSVs/model_predictions.csv', parse_dates=['Date']) model_data.set_index('Date', inplace=True) model_data['Model_Accuracy'] = 0 model_acc = sum(model_data['Correct_Pred']) / len(model_data['Correct_Pred']) model_data['Model_Accuracy'][-1:] = model_acc * 100 model_data = model_data.filter(['Date', 'Close', 'Prediction', 'Confidence', 'True_Label', 'Correct_Pred', 'Model_Accuracy'], axis=1) model_data = model_data.round(2) for_CSV_data = model_data[-1:] return for_CSV_data # now we will write our model performance measures to another CSV def to_performance_csv(for_CSV_data): ''' This function takes in our fully evaluated predictions and writes them to a CSV file. ''' if os.path.isfile('./CSVs/model_performance.csv'): for_CSV_data.to_csv('./CSVs/model_performance.csv', mode='a', header=0) else: for_CSV_data.to_csv('./CSVs/model_performance.csv') print('Data written to model_performance.csv!') # we will access our most recent row in our performance CSV column to get our desired trade info and calculate our current quantitative stats def get_trade_info(): trade_info = pd.read_csv('./CSVs/model_performance.csv', parse_dates=['Date']) trade_info.set_index('Date', inplace=True) trade_info = trade_info[-1:] trade_info = trade_info.rename({'Close':'Entry', 'Correct_Pred':'Win'}, axis=1) trade_info = trade_info.filter(['Date', 'Entry', 'Win'], axis=1) return trade_info # we will slightly modify our data gathering function from our predictor script def get_price(): ''' This function will execute API call to Binance to retrieve data. We will export the results of this data into the appropriately named dataframe for further feature engineering. ''' client = Client() # establishing our blank client candles = client.get_klines(symbol='BTCUSDT', interval=Client.KLINE_INTERVAL_1DAY, limit=1) # we only need to request the most recent entry data = pd.DataFrame(candles, columns=['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close time', 'Quote asset volume', 'Number of trades', 'Taker buy base volume', 'Taker buy quote volume', 'Ignore']) # these column labels are as labelled on the Binance API documentation price = data[['Date', 'Close']] # taking only the desired columns price['Date'] = price['Date'].apply(lambda x: pd.to_datetime(x, unit='ms')) # setting date to proper format price.set_index('Date', inplace=True) #setting our index return price # we will need a function to retrieve our current stake price, as we are stsarting out initially, we will not have a stake in any data # we will creat a function that will check if our desired CSV files exists tracking our stake # if it does not exist, the function will use our inital stake of 1000 def get_stake(): if os.path.isfile('./CSVs/model_quantitative_stats.csv'): data = pd.read_csv('./CSVs/model_quantitative_stats.csv', parse_dates=['Date']) data.set_index('Date', inplace=True) data_needed = data[-1:] stake = data_needed['Stake_Out'].values[0] else: stake = 1000 return stake # now to create a function that will calcuate our net percentage difference in price change and assign a value against # todays price which will be fetched from binance def get_gains(): ''' This function will intake the resulting dataframes from the get_trade_info and get_price functions and check the result against our entry price. This will then calculate a net percentage change. Using the value in the "win" column, we can tell the function to apply a positive or negative change to our stake value. All of this data will then be stored to a new database for recall. ''' trades = get_trade_info() price = get_price() price.Close = price.Close.astype('float') stake_in = get_stake() trades['Exit'] = price.Close.values net_change = abs(trades.Exit - trades.Entry) trades['Pct_Change'] = 0 trades['Gains(%)'] = 0 trades['Stake_In'] = stake_in trades['Stake_Out'] = 0 trades['Net_Profits'] = 0 trades['Profit_YTD'] = 0 trades['ROI(%)'] = 0 pct = net_change / trades.Entry trades['Pct_Change'] = pct trades['Gains(%)'] = -(trades.Pct_Change) if trades.Win.values == 0 else trades.Pct_Change trades['Stake_Out'] = trades.Stake_In + (trades.Stake_In * trades['Gains(%)']) trades['Net_Profits'] = trades.Stake_Out - trades.Stake_In trades['Profit_YTD'] = trades.Stake_Out - 1000 trades['ROI(%)'] = trades.Profit_YTD / 1000
indices are stored with a 32-bit dtype. .. versionadded:: 0.20 dtype : string, type, list of types or None (default="numeric") Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. If dtype is a list of types, conversion on the first type is only performed if the dtype of the input is not in the list. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. When order is None (default), then if copy=False, nothing is ensured about the memory layout of the output array; otherwise (copy=True) the memory layout of the returned array is kept as close as possible to the original array. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean or 'allow-nan', (default=True) Whether to raise an error on np.inf, np.nan, pd.NA in array. The possibilities are: - True: Force all values of array to be finite. - False: accepts np.inf, np.nan, pd.NA in array. - 'allow-nan': accepts only np.nan and pd.NA values in array. Values cannot be infinite. .. versionadded:: 0.20 ``force_all_finite`` accepts the string ``'allow-nan'``. .. versionchanged:: 0.23 Accepts `pd.NA` and converts it into `np.nan` ensure_2d : boolean (default=True) Whether to raise a value error if array is not 2D. allow_nd : boolean (default=False) Whether to allow array.ndim > 2. ensure_min_samples : int (default=1) Make sure that the array has a minimum number of samples in its first axis (rows for a 2D array). Setting to 0 disables this check. ensure_min_features : int (default=1) Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when the input data has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. estimator : str or estimator instance (default=None) If passed, include the name of the estimator in warning messages. Returns ------- array_converted : object The converted and validated array. """ # store reference to original array to check if copy is needed when # function returns array_orig = array #import pdb;pdb.set_trace() return array #TMP todo: perform checks for af::array # store whether originally we wanted numeric dtype dtype_numeric = isinstance(dtype, str) and dtype == "numeric" dtype_orig = getattr(array, "dtype", None) if not hasattr(dtype_orig, 'kind'): # not a data type (e.g. a column named dtype in a pandas DataFrame) dtype_orig = None # check if the object contains several dtypes (typically a pandas # DataFrame), and store them. If not, store None. dtypes_orig = None has_pd_integer_array = False if hasattr(array, "dtypes") and hasattr(array.dtypes, '__array__'): # throw warning if columns are sparse. If all columns are sparse, then # array.sparse exists and sparsity will be perserved (later). with suppress(ImportError): from pandas.api.types import is_sparse if (not hasattr(array, 'sparse') and array.dtypes.apply(is_sparse).any()): warnings.warn( "pandas.DataFrame with sparse columns found." "It will be converted to a dense numpy array." ) dtypes_orig = list(array.dtypes) # pandas boolean dtype __array__ interface coerces bools to objects for i, dtype_iter in enumerate(dtypes_orig): if dtype_iter.kind == 'b': dtypes_orig[i] = np.dtype(np.object) elif dtype_iter.name.startswith(("Int", "UInt")): # name looks like an Integer Extension Array, now check for # the dtype with suppress(ImportError): from pandas import (Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype) if isinstance(dtype_iter, (Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype)): has_pd_integer_array = True if all(isinstance(dtype, np.dtype) for dtype in dtypes_orig): dtype_orig = np.result_type(*dtypes_orig) if dtype_numeric: if dtype_orig is not None and dtype_orig.kind == "O": # if input is object, convert to float. dtype = np.float64 else: dtype = None if isinstance(dtype, (list, tuple)): if dtype_orig is not None and dtype_orig in dtype: # no dtype conversion required dtype = None else: # dtype conversion required. Let's select the first element of the # list of accepted types. dtype = dtype[0] if has_pd_integer_array: # If there are any pandas integer extension arrays, array = array.astype(dtype) if force_all_finite not in (True, False, 'allow-nan'): raise ValueError('force_all_finite should be a bool or "allow-nan"' '. Got {!r} instead'.format(force_all_finite)) if estimator is not None: if isinstance(estimator, str): estimator_name = estimator else: estimator_name = estimator.__class__.__name__ else: estimator_name = "Estimator" context = " by %s" % estimator_name if estimator is not None else "" # When all dataframe columns are sparse, convert to a sparse array if hasattr(array, 'sparse') and array.ndim > 1: # DataFrame.sparse only supports `to_coo` array = array.sparse.to_coo() if sp.issparse(array): _ensure_no_complex_data(array) array = _ensure_sparse_format(array, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, accept_large_sparse=accept_large_sparse) else: # If np.array(..) gives ComplexWarning, then we convert the warning # to an error. This is needed because specifying a non complex # dtype to the function converts complex to real dtype, # thereby passing the test made in the lines following the scope # of warnings context manager. with warnings.catch_warnings(): try: warnings.simplefilter('error', ComplexWarning) if dtype is not None and np.dtype(dtype).kind in 'iu': # Conversion float -> int should not contain NaN or # inf (numpy#14412). We cannot use casting='safe' because # then conversion float -> int would be disallowed. array = np.asarray(array, order=order) if array.dtype.kind == 'f': _assert_all_finite(array, allow_nan=False, msg_dtype=dtype) array = array.astype(dtype, casting="unsafe", copy=False) else: array = np.asarray(array, order=order, dtype=dtype) except ComplexWarning: raise ValueError("Complex data not supported\n" "{}\n".format(array)) # It is possible that the np.array(..) gave no warning. This happens # when no dtype conversion happened, for example dtype = None. The # result is that np.array(..) produces an array of complex dtype # and we need to catch and raise exception for such cases. _ensure_no_complex_data(array) if ensure_2d: # If input is scalar raise error if array.ndim == 0: raise ValueError( "Expected 2D array, got scalar array instead:\narray={}.\n" "Reshape your data either using array.reshape(-1, 1) if " "your data has a single feature or array.reshape(1, -1) " "if it contains a single sample.".format(array)) # If input is 1D raise error if array.ndim == 1: raise ValueError( "Expected 2D array, got 1D array instead:\narray={}.\n" "Reshape your data either using array.reshape(-1, 1) if " "your data has a single feature or array.reshape(1, -1) " "if it contains a single sample.".format(array)) # in the future np.flexible dtypes will be handled like object dtypes #if dtype_numeric and np.issubdtype(array.dtype, np.flexible): #warnings.warn( #"Beginning in version 0.22, arrays of bytes/strings will be " #"converted to decimal numbers if dtype='numeric'. " #"It is recommended that you convert the array to " #"a float dtype before using it in scikit-learn, " #"for example by using " #"your_array = your_array.astype(np.float64).", #FutureWarning, stacklevel=2) # make sure we actually converted to numeric: if dtype_numeric and array.dtype.kind == "O": array = array.astype(np.float64) if not allow_nd and array.ndim >= 3: raise ValueError("Found array with dim %d. %s expected <= 2." % (array.ndim, estimator_name)) if force_all_finite: _assert_all_finite(array, allow_nan=force_all_finite == 'allow-nan') if ensure_min_samples > 0: n_samples = _num_samples(array) if n_samples < ensure_min_samples: raise ValueError("Found array with %d sample(s) (shape=%s) while a" " minimum of %d is required%s." % (n_samples, array.shape, ensure_min_samples, context)) if ensure_min_features > 0 and array.ndim == 2: n_features = array.shape[1] if n_features < ensure_min_features: raise ValueError("Found array with %d feature(s) (shape=%s) while" " a minimum of %d is required%s." % (n_features, array.shape, ensure_min_features, context)) if copy and np.may_share_memory(array, array_orig): array = np.array(array, dtype=dtype, order=order) return array @_deprecate_positional_args def check_X_y(X, y, accept_sparse=False, *, accept_large_sparse=True, dtype="numeric", order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, multi_output=False, ensure_min_samples=1, ensure_min_features=1, y_numeric=False, estimator=None): """Input validation for standard estimators. Checks X and y for consistent length, enforces X to be 2D and y 1D. By default, X is checked to be non-empty and containing only finite values. Standard input checks are also applied to y, such as checking that y does not have np.nan or np.inf targets. For multi-label y, set multi_output=True to allow 2D and sparse y. If
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import httpretty from novaclient.openstack.common import jsonutils from novaclient.tests import fakes from novaclient.tests.fixture_data import base class Base(base.Fixture): base_url = 'servers' def setUp(self): super(Base, self).setUp() get_servers = { "servers": [ {'id': 1234, 'name': 'sample-server'}, {'id': 5678, 'name': 'sample-server2'} ] } httpretty.register_uri(httpretty.GET, self.url(), body=jsonutils.dumps(get_servers), content_type='application/json') self.server_1234 = { "id": 1234, "name": "sample-server", "image": { "id": 2, "name": "sample image", }, "flavor": { "id": 1, "name": "256 MB Server", }, "hostId": "e4d909c290d0fb1ca068ffaddf22cbd0", "status": "BUILD", "progress": 60, "addresses": { "public": [{ "version": 4, "addr": "1.2.3.4", }, { "version": 4, "addr": "5.6.7.8", }], "private": [{ "version": 4, "addr": "10.11.12.13", }], }, "metadata": { "Server Label": "Web Head 1", "Image Version": "2.1" }, "OS-EXT-SRV-ATTR:host": "computenode1", "security_groups": [{ 'id': 1, 'name': 'securitygroup1', 'description': 'FAKE_SECURITY_GROUP', 'tenant_id': '4ffc664c198e435e9853f2538fbcd7a7' }], "OS-EXT-MOD:some_thing": "mod_some_thing_value", } self.server_5678 = { "id": 5678, "name": "sample-server2", "image": { "id": 2, "name": "sample image", }, "flavor": { "id": 1, "name": "256 MB Server", }, "hostId": "9e107d9d372bb6826bd81d3542a419d6", "status": "ACTIVE", "addresses": { "public": [{ "version": 4, "addr": "4.5.6.7", }, { "version": 4, "addr": "172.16.31.10", }], "private": [{ "version": 4, "addr": "10.13.12.13", }], }, "metadata": { "Server Label": "DB 1" }, "OS-EXT-SRV-ATTR:host": "computenode2", "security_groups": [{ 'id': 1, 'name': 'securitygroup1', 'description': 'FAKE_SECURITY_GROUP', 'tenant_id': '4ffc664c198e435e9853f2538fbcd7a7' }, { 'id': 2, 'name': 'securitygroup2', 'description': 'ANOTHER_FAKE_SECURITY_GROUP', 'tenant_id': '4ffc664c198e435e9853f2538fbcd7a7' }], } self.server_9012 = { "id": 9012, "name": "sample-server3", "image": "", "flavor": { "id": 1, "name": "256 MB Server", }, "hostId": "9e107d9d372bb6826bd81d3542a419d6", "status": "ACTIVE", "addresses": { "public": [{ "version": 4, "addr": "4.5.6.7", }, { "version": 4, "addr": "5.6.9.8", }], "private": [{ "version": 4, "addr": "10.13.12.13", }], }, "metadata": { "Server Label": "DB 1" } } servers = [self.server_1234, self.server_5678, self.server_9012] get_servers_detail = {"servers": servers} httpretty.register_uri(httpretty.GET, self.url('detail'), body=jsonutils.dumps(get_servers_detail), content_type='application/json') self.server_1235 = self.server_1234.copy() self.server_1235['id'] = 1235 self.server_1235['status'] = 'error' self.server_1235['fault'] = {'message': 'something went wrong!'} servers.append(self.server_1235) for s in servers: httpretty.register_uri(httpretty.GET, self.url(s['id']), body=jsonutils.dumps({'server': s}), content_type='application/json') for s in (1234, 5678): httpretty.register_uri(httpretty.DELETE, self.url(s), status=202) for k in ('test_key', 'key1', 'key2'): httpretty.register_uri(httpretty.DELETE, self.url(1234, 'metadata', k), status=204) metadata1 = jsonutils.dumps({'metadata': {'test_key': 'test_value'}}) httpretty.register_uri(httpretty.POST, self.url(1234, 'metadata'), body=metadata1, status=200, content_type='application/json') httpretty.register_uri(httpretty.PUT, self.url(1234, 'metadata', 'test_key'), body=metadata1, status=200, content_type='application/json') self.diagnostic = jsonutils.dumps({'data': 'Fake diagnostics'}) metadata2 = jsonutils.dumps({'metadata': {'key1': 'val1'}}) for u in ('uuid1', 'uuid2', 'uuid3', 'uuid4'): httpretty.register_uri(httpretty.POST, self.url(u, 'metadata'), body=metadata2, status=204) httpretty.register_uri(httpretty.DELETE, self.url(u, 'metadata', 'key1'), body=self.diagnostic, content_type='application/json') get_security_groups = { "security_groups": [{ 'id': 1, 'name': 'securitygroup1', 'description': 'FAKE_SECURITY_GROUP', 'tenant_id': '4ffc664c198e435e9853f2538fbcd7a7', 'rules': []}] } httpretty.register_uri(httpretty.GET, self.url('1234', 'os-security-groups'), body=jsonutils.dumps(get_security_groups), status=200) httpretty.register_uri(httpretty.POST, self.url(), body=self.post_servers, content_type='application/json') httpretty.register_uri(httpretty.POST, self.url('1234', 'action'), body=self.post_servers_1234_action, content_type='application/json') get_os_interface = { "interfaceAttachments": [ { "port_state": "ACTIVE", "net_id": "net-id-1", "port_id": "port-id-1", "mac_address": "aa:bb:cc:dd:ee:ff", "fixed_ips": [{"ip_address": "1.2.3.4"}], }, { "port_state": "ACTIVE", "net_id": "net-id-1", "port_id": "port-id-1", "mac_address": "aa:bb:cc:dd:ee:ff", "fixed_ips": [{"ip_address": "1.2.3.4"}], } ] } httpretty.register_uri(httpretty.GET, self.url('1234', 'os-interface'), body=jsonutils.dumps(get_os_interface), content_type='application/json') interface_data = {'interfaceAttachment': {}} httpretty.register_uri(httpretty.POST, self.url('1234', 'os-interface'), body=jsonutils.dumps(interface_data), content_type='application/json') def put_servers_1234(request, url, headers): body = jsonutils.loads(request.body.decode('utf-8')) assert list(body) == ['server'] fakes.assert_has_keys(body['server'], optional=['name', 'adminPass']) return 204, headers, request.body httpretty.register_uri(httpretty.PUT, self.url(1234), body=put_servers_1234, content_type='application/json') def post_os_volumes_boot(request, url, headers): body = jsonutils.loads(request.body.decode('utf-8')) assert (set(body.keys()) <= set(['server', 'os:scheduler_hints'])) fakes.assert_has_keys(body['server'], required=['name', 'flavorRef'], optional=['imageRef']) data = body['server'] # Require one, and only one, of the keys for bdm if 'block_device_mapping' not in data: if 'block_device_mapping_v2' not in data: msg = "missing required keys: 'block_device_mapping'" raise AssertionError(msg) elif 'block_device_mapping_v2' in data: msg = "found extra keys: 'block_device_mapping'" raise AssertionError(msg) return 202, headers, jsonutils.dumps({'server': self.server_9012}) # NOTE(jamielennox): hack to make os_volumes mock go to the right place base_url = self.base_url self.base_url = None httpretty.register_uri(httpretty.POST, self.url('os-volumes_boot'), body=post_os_volumes_boot, content_type='application/json') self.base_url = base_url # # Server password # httpretty.register_uri(httpretty.DELETE, self.url(1234, 'os-server-password'), status=202) class V1(Base): def setUp(self): super(V1, self).setUp() # # Server Addresses # add = self.server_1234['addresses'] httpretty.register_uri(httpretty.GET, self.url(1234, 'ips'), jsonutils.dumps({'addresses': add}), content_type='application/json') httpretty.register_uri(httpretty.GET, self.url(1234, 'ips', 'public'), jsonutils.dumps({'public': add['public']}), content_type='application/json') httpretty.register_uri(httpretty.GET, self.url(1234, 'ips', 'private'), jsonutils.dumps({'private': add['private']}), content_type='application/json') httpretty.register_uri(httpretty.DELETE, self.url(1234, 'ips', 'public', '1.2.3.4'), status=202) httpretty.register_uri(httpretty.GET, self.url('1234', 'diagnostics'), body=self.diagnostic, status=200) httpretty.register_uri(httpretty.DELETE, self.url('1234', 'os-interface', 'port-id')) # Testing with the following password and key # # Clear password: <PASSWORD> # # RSA Private Key: novaclient/tests/idfake.pem # # Encrypted password # <KEY> # <KEY> # <KEY> # <KEY> # <KEY> # Hi/fmZZNQQqj1Ijq0caOIw== get_server_password = {'password': '<KEY>' '<KEY>' '<KEY>' <KEY>3B9JZGFB2qtPLQTOvDMZLUhoPRIJeHiVSlo1N' 'tI2/++UsXVg3ow6ItqCJGgdNuGG5JB+bslDHWPxROpesEIHdczk46HCpHQN8f1sk' 'Hi/fmZZNQQqj1Ijq0caOIw=='} httpretty.register_uri(httpretty.GET, self.url(1234, 'os-server-password'), jsonutils.dumps(get_server_password)) def post_servers(self, request, url, headers): body = jsonutils.loads(request.body.decode('utf-8')) assert (set(body.keys()) <= set(['server', 'os:scheduler_hints'])) fakes.assert_has_keys(body['server'], required=['name', 'imageRef', 'flavorRef'], optional=['metadata', 'personality']) if 'personality' in body['server']: for pfile in body['server']['personality']: fakes.assert_has_keys(pfile, required=['path', 'contents']) if body['server']['name'] == 'some-bad-server': body = self.server_1235 else: body = self.server_1234 return 202, headers, jsonutils.dumps({'server': body}) def post_servers_1234_action(self, request, url, headers): _body = '' body = jsonutils.loads(request.body.decode('utf-8')) resp = 202 assert len(body.keys()) == 1 action = list(body)[0] if action == 'reboot': assert list(body[action]) == ['type'] assert body[action]['type'] in ['HARD', 'SOFT'] elif action == 'rebuild': body = body[action] adminPass = body.get('adminPass', '<PASSWORD>') assert 'imageRef' in body _body = self.server_1234.copy() _body['adminPass'] = adminPass elif action == 'resize': keys = body[action].keys() assert 'flavorRef' in keys elif action == 'confirmResize': assert body[action] is None # This one method returns a different response code return 204, headers, '' elif action == 'revertResize': assert body[action] is None elif action == 'migrate': assert body[action] is None elif action == 'os-stop': assert body[action] is None elif action == 'os-start': assert body[action] is None elif action == 'forceDelete': assert body[action] is None elif action == 'restore': assert body[action] is None elif action == 'pause': assert body[action] is None elif action == 'unpause': assert body[action] is None elif action == 'lock': assert body[action] is None elif action == 'unlock': assert body[action] is None elif action == 'rescue': assert body[action] is None _body = {'Password': '<PASSWORD>'} elif action == 'unrescue': assert body[action] is None elif action == 'resume': assert body[action] is None elif action == 'suspend': assert body[action] is None elif action == 'lock': assert body[action] is None elif action == 'unlock': assert body[action] is None elif action == 'shelve': assert body[action] is None elif action == 'shelveOffload': assert body[action] is None elif action == 'unshelve': assert body[action] is None elif action == 'addFixedIp': assert list(body[action]) == ['networkId'] elif action == 'removeFixedIp': assert list(body[action]) == ['address'] elif action == 'addFloatingIp': assert (list(body[action]) == ['address'] or sorted(list(body[action])) == ['address', 'fixed_address']) elif action == 'removeFloatingIp': assert list(body[action]) == ['address'] elif action == 'createImage': assert set(body[action].keys()) == set(['name', 'metadata']) headers['location'] = "http://blah/images/456" elif action == 'changePassword': assert list(body[action]) == ['adminPass'] elif action == 'os-getConsoleOutput': assert list(body[action]) == ['length'] return 202, headers, jsonutils.dumps({'output': 'foo'}) elif action == 'os-getVNCConsole': assert list(body[action]) == ['type'] elif action == 'os-getSPICEConsole': assert list(body[action]) == ['type'] elif action == 'os-getRDPConsole': assert list(body[action]) == ['type'] elif action == 'os-migrateLive': assert set(body[action].keys()) == set(['host', 'block_migration', 'disk_over_commit']) elif action == 'os-resetState': assert list(body[action]) == ['state'] elif action == 'resetNetwork': assert body[action] is None elif action == 'addSecurityGroup': assert list(body[action]) == ['name'] elif action == 'removeSecurityGroup': assert list(body[action]) == ['name'] elif action == 'createBackup': assert set(body[action]) == set(['name', 'backup_type', 'rotation']) elif action == 'evacuate': keys = list(body[action]) if 'adminPass' in keys: keys.remove('adminPass') assert set(keys) == set(['host', 'onSharedStorage']) else: raise AssertionError("Unexpected server action: %s" % action) return resp, headers, jsonutils.dumps({'server': _body}) class V3(Base): def setUp(self): super(V3, self).setUp() get_interfaces = { "interface_attachments": [ { "port_state": "ACTIVE", "net_id": "net-id-1", "port_id": "port-id-1", "mac_address": "aa:bb:cc:dd:ee:ff", "fixed_ips": [{"ip_address": "1.2.3.4"}], }, { "port_state": "ACTIVE", "net_id": "net-id-1", "port_id": "port-id-1", "mac_address": "aa:bb:cc:dd:ee:ff", "fixed_ips": [{"ip_address": "172.16.58.3"}], } ] } httpretty.register_uri(httpretty.GET, self.url('1234', 'os-attach-interfaces'), body=jsonutils.dumps(get_interfaces), content_type='application/json') attach_body = {'interface_attachment': {}} httpretty.register_uri(httpretty.POST, self.url('1234', 'os-attach-interfaces'), body=jsonutils.dumps(attach_body), content_type='application/json') httpretty.register_uri(httpretty.GET, self.url('1234', 'os-server-diagnostics'), body=self.diagnostic, status=200) httpretty.register_uri(httpretty.DELETE, self.url('1234', 'os-attach-interfaces', 'port-id')) httpretty.register_uri(httpretty.GET, self.url(1234, 'os-server-password'), jsonutils.dumps({'password': ''})) def post_servers(self, request, url, headers): body = jsonutils.loads(request.body.decode('utf-8')) assert set(body.keys()) <= set(['server']) fakes.assert_has_keys(body['server'], required=['name', 'image_ref', 'flavor_ref'], optional=['metadata', 'personality', 'os-scheduler-hints:scheduler_hints']) if body['server']['name']
known_toposort) # add a cyclic dependency, jacket to undershorts myjob.add_deps(undershorts.id, jacket.id) # no exceptions raised, but result None self.assertEqual(myjob.validate('job_1'), None) def testJobGraphFailing(self): s = Scheduler(self.db) myjob = JobGraph(self.db, 'job_1') fname = 'foo' # We have a few items to wear, and there's an "order" to respect... # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks # Now, we can't put on the tie without wearing the shirt first, etc... watch = s.queue_task(fname, task_name='watch') jacket = s.queue_task(fname, task_name='jacket') shirt = s.queue_task(fname, task_name='shirt') tie = s.queue_task(fname, task_name='tie') pants = s.queue_task(fname, task_name='pants') undershorts = s.queue_task(fname, task_name='undershorts') belt = s.queue_task(fname, task_name='belt') shoes = s.queue_task(fname, task_name='shoes') socks = s.queue_task(fname, task_name='socks') # before the tie, comes the shirt myjob.add_deps(tie.id, shirt.id) # before the belt too comes the shirt myjob.add_deps(belt.id, shirt.id) # before the jacket, comes the tie myjob.add_deps(jacket.id, tie.id) # before the belt, come the pants myjob.add_deps(belt.id, pants.id) # before the shoes, comes the pants myjob.add_deps(shoes.id, pants.id) # before the pants, comes the undershorts myjob.add_deps(pants.id, undershorts.id) # before the shoes, comes the undershorts myjob.add_deps(shoes.id, undershorts.id) # before the jacket, comes the belt myjob.add_deps(jacket.id, belt.id) # before the shoes, comes the socks myjob.add_deps(shoes.id, socks.id) # add a cyclic dependency, jacket to undershorts myjob.add_deps(undershorts.id, jacket.id) # no exceptions raised, but result None self.assertEqual(myjob.validate('job_1'), None) # and no deps added deps_inserted = self.db(self.db.scheduler_task_deps.id>0).count() self.assertEqual(deps_inserted, 0) def testJobGraphDifferentJobs(self): s = Scheduler(self.db) myjob1 = JobGraph(self.db, 'job_1') myjob2 = JobGraph(self.db, 'job_2') fname = 'foo' # We have a few items to wear, and there's an "order" to respect... # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks # Now, we can't put on the tie without wearing the shirt first, etc... watch = s.queue_task(fname, task_name='watch') jacket = s.queue_task(fname, task_name='jacket') shirt = s.queue_task(fname, task_name='shirt') tie = s.queue_task(fname, task_name='tie') pants = s.queue_task(fname, task_name='pants') undershorts = s.queue_task(fname, task_name='undershorts') belt = s.queue_task(fname, task_name='belt') shoes = s.queue_task(fname, task_name='shoes') socks = s.queue_task(fname, task_name='socks') # before the tie, comes the shirt myjob1.add_deps(tie.id, shirt.id) # before the belt too comes the shirt myjob1.add_deps(belt.id, shirt.id) # before the jacket, comes the tie myjob1.add_deps(jacket.id, tie.id) # before the belt, come the pants myjob1.add_deps(belt.id, pants.id) # before the shoes, comes the pants myjob2.add_deps(shoes.id, pants.id) # before the pants, comes the undershorts myjob2.add_deps(pants.id, undershorts.id) # before the shoes, comes the undershorts myjob2.add_deps(shoes.id, undershorts.id) # before the jacket, comes the belt myjob2.add_deps(jacket.id, belt.id) # before the shoes, comes the socks myjob2.add_deps(shoes.id, socks.id) # every job by itself can be completed self.assertNotEqual(myjob1.validate('job_1'), None) self.assertNotEqual(myjob1.validate('job_2'), None) # and, implicitly, every queued task can be too self.assertNotEqual(myjob1.validate(), None) # add a cyclic dependency, jacket to undershorts myjob2.add_deps(undershorts.id, jacket.id) # every job can still be completed by itself self.assertNotEqual(myjob1.validate('job_1'), None) self.assertNotEqual(myjob1.validate('job_2'), None) # but trying to see if every task will ever be completed fails self.assertEqual(myjob2.validate(), None) class TestsForSchedulerAPIs(BaseTestScheduler): def testQueue_Task(self): def isnotqueued(result): self.assertEqual(result.id, None) self.assertEqual(result.uuid, None) self.assertEqual(len(result.errors.keys()) > 0, True) def isqueued(result): self.assertNotEqual(result.id, None) self.assertNotEqual(result.uuid, None) self.assertEqual(len(result.errors.keys()), 0) s = Scheduler(self.db) fname = 'foo' watch = s.queue_task(fname, task_name='watch') # queuing a task returns id, errors, uuid self.assertEqual(set(watch.keys()), set(['id', 'uuid', 'errors'])) # queueing nothing isn't allowed self.assertRaises(TypeError, s.queue_task, *[]) # passing pargs and pvars wrongly # # pargs as dict isnotqueued(s.queue_task(fname, dict(a=1), dict(b=1))) # # pvars as list isnotqueued(s.queue_task(fname, ['foo', 'bar'], ['foo', 'bar'])) # two tasks with the same uuid won't be there isqueued(s.queue_task(fname, uuid='a')) isnotqueued(s.queue_task(fname, uuid='a')) # # #FIXME add here every parameter def testTask_Status(self): s = Scheduler(self.db) fname = 'foo' watch = s.queue_task(fname, task_name='watch') # fetch status by id by_id = s.task_status(watch.id) # fetch status by uuid by_uuid = s.task_status(watch.uuid) # fetch status by query by_query = s.task_status(self.db.scheduler_task.function_name == 'foo') self.assertEqual(by_id, by_uuid) self.assertEqual(by_id, by_query) # fetch status by anything else throws self.assertRaises(SyntaxError, s.task_status, *[[1, 2]]) # adding output returns the joined set, plus "result" rtn = s.task_status(watch.id, output=True) self.assertEqual(set(rtn.keys()), set(['scheduler_run', 'scheduler_task', 'result'])) class testForSchedulerRunnerBase(BaseTestScheduler): def inner_teardown(self): from gluon import current fdest = os.path.join(current.request.folder, 'models', 'scheduler.py') os.unlink(fdest) additional_files = [ os.path.join(current.request.folder, 'private', 'demo8.pholder') ] for f in additional_files: try: os.unlink(f) except: pass def writefunction(self, content, initlines=None): from gluon import current fdest = os.path.join(current.request.folder, 'models', 'scheduler.py') if initlines is None: initlines = """ import os import time from gluon.scheduler import Scheduler db_dal = os.path.abspath(os.path.join(request.folder, '..', '..', 'dummy2.db')) sched_dal = DAL('sqlite://%s' % db_dal, folder=os.path.dirname(db_dal)) sched = Scheduler(sched_dal, max_empty_runs=15, migrate=False, heartbeat=1) """ with open(fdest, 'w') as q: q.write(initlines) q.write(content) def exec_sched(self): import subprocess call_args = [sys.executable, 'web2py.py', '--no-banner', '-D', '20','-K', 'welcome'] ret = subprocess.call(call_args, env=dict(os.environ)) return ret def fetch_results(self, sched, task): info = sched.task_status(task.id) task_runs = self.db(self.db.scheduler_run.task_id == task.id).select() return info, task_runs def exec_asserts(self, stmts, tag): for stmt in stmts: self.assertEqual(stmt[1], True, msg="%s - %s" % (tag, stmt[0])) class TestsForSchedulerRunner(testForSchedulerRunnerBase): def testRepeats_and_Expired_and_Prio(self): s = Scheduler(self.db) repeats = s.queue_task('demo1', ['a', 'b'], dict(c=1, d=2), repeats=2, period=5) a_while_ago = datetime.datetime.now() - datetime.timedelta(seconds=60) expired = s.queue_task('demo4', stop_time=a_while_ago) prio1 = s.queue_task('demo1', ['scheduled_first']) prio2 = s.queue_task('demo1', ['scheduled_second'], next_run_time=a_while_ago) self.db.commit() self.writefunction(r""" def demo1(*args,**vars): print('you passed args=%s and vars=%s' % (args, vars)) return args[0] def demo4(): time.sleep(15) print("I'm printing something") return dict(a=1, b=2) """) ret = self.exec_sched() self.assertEqual(ret, 0) # repeats check task, task_run = self.fetch_results(s, repeats) res = [ ("task status completed", task.status == 'COMPLETED'), ("task times_run is 2", task.times_run == 2), ("task ran 2 times only", len(task_run) == 2), ("scheduler_run records are COMPLETED ", (task_run[0].status == task_run[1].status == 'COMPLETED')), ("period is respected", (task_run[1].start_time > task_run[0].start_time + datetime.timedelta(seconds=task.period))) ] self.exec_asserts(res, 'REPEATS') # expired check task, task_run = self.fetch_results(s, expired) res = [ ("task status expired", task.status == 'EXPIRED'), ("task times_run is 0", task.times_run == 0), ("task didn't run at all", len(task_run) == 0) ] self.exec_asserts(res, 'EXPIRATION') # prio check task1 = s.task_status(prio1.id, output=True) task2 = s.task_status(prio2.id, output=True) res = [ ("tasks status completed", task1.scheduler_task.status == task2.scheduler_task.status == 'COMPLETED'), ("priority2 was executed before priority1" , task1.scheduler_run.id > task2.scheduler_run.id) ] self.exec_asserts(res, 'PRIORITY') def testNoReturn_and_Timeout_and_Progress(self): s = Scheduler(self.db) noret1 = s.queue_task('demo5') noret2 = s.queue_task('demo3') timeout1 = s.queue_task('demo4', timeout=5) timeout2 = s.queue_task('demo4') progress = s.queue_task('demo6', sync_output=2) self.db.commit() self.writefunction(r""" def demo3(): time.sleep(15) print(1/0) return None def demo4(): time.sleep(15) print("I'm printing something") return dict(a=1, b=2) def demo5(): time.sleep(15) print("I'm printing something") rtn = dict(a=1, b=2) def demo6(): time.sleep(5) print('50%') time.sleep(5) print('!clear!100%') return 1 """) ret = self.exec_sched() self.assertEqual(ret, 0) # noreturn check task1, task_run1 = self.fetch_results(s, noret1) task2, task_run2 = self.fetch_results(s, noret2) res = [ ("tasks no_returns1 completed", task1.status == 'COMPLETED'), ("tasks no_returns2 failed", task2.status == 'FAILED'), ("no_returns1 doesn't have a scheduler_run record", len(task_run1) == 0), ("no_returns2 has a scheduler_run record FAILED", (len(task_run2) == 1 and task_run2[0].status == 'FAILED')), ] self.exec_asserts(res, 'NO_RETURN') # timeout check task1 = s.task_status(timeout1.id, output=True) task2 = s.task_status(timeout2.id, output=True) res = [ ("tasks timeouts1 timeoutted", task1.scheduler_task.status == 'TIMEOUT'), ("tasks timeouts2 completed", task2.scheduler_task.status == 'COMPLETED') ] self.exec_asserts(res, 'TIMEOUT') # progress check task1 = s.task_status(progress.id, output=True) res = [ ("tasks percentages completed", task1.scheduler_task.status == 'COMPLETED'), ("output contains only 100%", task1.scheduler_run.run_output.strip() == "100%") ] self.exec_asserts(res, 'PROGRESS') def testDrift_and_env_and_immediate(self): s = Scheduler(self.db) immediate = s.queue_task('demo1', ['a', 'b'], dict(c=1, d=2), immediate=True) env = s.queue_task('demo7') drift = s.queue_task('demo1', ['a', 'b'], dict(c=1, d=2), period=93, prevent_drift=True) self.db.commit() self.writefunction(r""" def demo1(*args,**vars): print('you passed args=%s and vars=%s' % (args, vars)) return args[0] import random def demo7(): time.sleep(random.randint(1,5)) print(W2P_TASK, request.now) return W2P_TASK.id, W2P_TASK.uuid, W2P_TASK.run_id """) ret = self.exec_sched() self.assertEqual(ret, 0) # immediate check, can only check that nothing breaks task1 = s.task_status(immediate.id) res = [ ("tasks status completed", task1.status == 'COMPLETED'), ] self.exec_asserts(res, 'IMMEDIATE') # drift check task, task_run = self.fetch_results(s, drift) res = [ ("task status completed", task.status == 'COMPLETED'), ("next_run_time is exactly start_time + period", (task.next_run_time == task.start_time + datetime.timedelta(seconds=task.period))) ] self.exec_asserts(res, 'DRIFT') # env check task1 = s.task_status(env.id, output=True) res = [ ("task %s returned W2P_TASK correctly" % (task1.scheduler_task.id), task1.result == [task1.scheduler_task.id, task1.scheduler_task.uuid, task1.scheduler_run.id]), ] self.exec_asserts(res, 'ENV') def testRetryFailed(self): s = Scheduler(self.db) failed = s.queue_task('demo2', retry_failed=1, period=1) failed_consecutive = s.queue_task('demo8', retry_failed=2, repeats=2, period=1) self.db.commit() self.writefunction(r""" def demo2(): 1/0 def demo8(): placeholder = os.path.join(request.folder, 'private', 'demo8.pholder') with open(placeholder, 'a') as g: g.write('\nplaceholder for demo8 created') num_of_lines = 0 with open(placeholder) as f: num_of_lines = len([a for a in f.read().split('\n') if a]) print('number of lines', num_of_lines) if
# -*- coding: utf-8 -*- """ Created on Fri Nov 10 13:31:55 2017 @author: Astrid """ import math as m import pandas as pd def auto_disc_calc_grid(steps, min_grid, max_grid, detail, stretch_factor): new_grid = list() total_width = sum(steps) ideal_min_width = total_width/(20+2*detail) # detail == 1 --> stretch = 1.8 # detail == 100 --> stretch = 1.04 if stretch_factor==0: MIN_DETAIL = 1 MAX_DETAIL = 100 MAX_STRETCH = 1.8 MIN_STRETCH = 1.04 b = (MAX_STRETCH - MIN_STRETCH)/((MIN_DETAIL - MAX_DETAIL)*(MIN_DETAIL - MAX_DETAIL)) stretch_factor = b*(detail - MAX_DETAIL)*(detail - MAX_DETAIL) + MIN_STRETCH # first determine the left/right element sides left_side_dx = list(); right_side_dx = list(); for i in range(len(steps)): s = min_grid # did the user fix the grid sides? if min_grid != 0: s = min_grid else: s = ideal_min_width # check if layer width is less then 3*min_grid width if s*3 > steps[i]: s = steps[i]/3 left_side_dx.append(s) right_side_dx.append(s) # Now loop again over all interior boundaries and check that the sizes of the neighboring cells are within the stretch factor ratio. for i in range(len(steps)-1): if right_side_dx[i] < left_side_dx[i+1]: ratio = left_side_dx[i+1]/right_side_dx[i] if ratio > stretch_factor: left_side_dx[i+1] = right_side_dx[i]*stretch_factor else: ratio = right_side_dx[i]/left_side_dx[i+1] if ratio > stretch_factor: right_side_dx[i] = left_side_dx[i+1]*stretch_factor # Now for the actual discretization loop for i in range(len(steps)): effective_max_grid = max_grid # If the max_grid is already smaller than the grid on either side, simply ignore the max grid parameter if effective_max_grid != 0: if effective_max_grid < left_side_dx[i]*stretch_factor or effective_max_grid < right_side_dx[i]*stretch_factor: effective_max_grid = 0 # iterative simple approach grid_from_left = list() grid_from_right = list() grid_from_left.append(left_side_dx[i]) grid_from_right.append(right_side_dx[i]) space_left = steps[i] - grid_from_left[0] - grid_from_right[0] # Check special case if space_left < steps[i]/3*stretch_factor: # we have a 3-element layer, so we are already done new_steps = list((grid_from_left[0], space_left, grid_from_right[0])) new_grid.append(new_steps) continue # Iteratively add grid to both sides until we have filled the space iterations = 1000 while space_left > 1e-8 or iterations == 0: # special case if grid is the same on both sides if grid_from_left[-1] == grid_from_right[-1]: new_dx = grid_from_left[-1]*stretch_factor if new_dx*2 < space_left and (effective_max_grid == 0 or new_dx <= effective_max_grid): grid_from_left.append(new_dx) grid_from_right.append(new_dx) space_left = space_left - 2*new_dx # Only discretize on left if we have smaller elements than the other side if grid_from_left[-1] < grid_from_right[-1]: # continue on left side new_dx = grid_from_left[-1]*stretch_factor # if we are within the constraints, add the grid element if new_dx < space_left and (effective_max_grid == 0 or new_dx <= effective_max_grid): grid_from_left.append(new_dx) space_left = space_left - new_dx # otherwise we don't add and wait what happens # Only discretize on right if we have smaller elements than the other side, here we have the chance to 'catch up' with the left side if grid_from_left[-1] > grid_from_right[-1]: # continue on right side new_dx = grid_from_right[-1]*stretch_factor # if we are within the constraints, add the grid element if new_dx < space_left and (effective_max_grid == 0 or new_dx <= effective_max_grid): grid_from_right.append(new_dx) space_left = space_left - new_dx # otherwise we don't add and wait what happens # calculate remaining space space_left = steps[i] - sum(grid_from_left) - sum(grid_from_right) # Now we have to deal with several cases: #1. space_left < 0 ... we added too many elements and have to step back a bit #2. space_left > 1e-8 (the limit) but space_left is smaller than the biggest grid cells on both sides #3. space_left > 1e-8 and there is enough space to continue meshing (that's a simple continue) #3.1 we could continue meshing but we have reached the maximum grid size already on both sides # Case 1 : overshot, shouldn't happen if space_left < -1e-8: print('overshot!') # calculate the space needed at least needed_space = min([grid_from_left[-1]*stretch_factor, grid_from_right[-1]*stretch_factor]) if space_left < needed_space: not_enough_space = True else: not_enough_space = False if grid_from_left[-1] == grid_from_right[-1]: if space_left < needed_space*2: not_enough_space = True else: not_enough_space = False # Case 2 : we don't have enough space for further meshing, this is the most critical case! if space_left > 1e-8 and not_enough_space: # Essentially we have to distribute the sum of the innermost 2 cells + space_left over 3 or 4 cells depending on the stretch factor available_space = space_left + grid_from_left[-1] + grid_from_right[-1] # now check if three cells would work dx = available_space/3 if dx < grid_from_left[-1] and dx < grid_from_right[-1]: # ok, proposal accepted grid_from_left[-1] = dx grid_from_left.append(dx) grid_from_right[-1] = dx else: dx = available_space/4 grid_from_left[-1] = dx grid_from_left.append(dx) grid_from_right[-1] = dx grid_from_right.append(dx) space_left = 0; continue # done # Case 3 : ok, all good, go on with meshing # Case 3.1 : what if we have reached the maximum grid size already? if effective_max_grid != 0 and needed_space > effective_max_grid: # Now we have to distribute the remaining space into equidistant steps < effective_max_grid n = m.ceil(space_left/effective_max_grid) dx = space_left/n for j in range(n): grid_from_left.append(dx) space_left = 0 # this will stop the iteration now iterations = iterations - 1 # Adjust biggest element of the grid_from_left vector such, that the sum of all elements equals steps[i] exactly dx_adjust = steps[i] - sum(grid_from_left) - sum(grid_from_right) grid_from_left[-1] = (grid_from_left[-1] + dx_adjust) # Now create the new steps vector that contains the complete grid grid_from_right.reverse() new_steps = grid_from_left + grid_from_right # Finally! add the new steps new_grid.append(new_steps) return new_grid def adjust_ranges(selection_range, old_range, new_range, x_direction): # Calculate the differences in widths and heights and resulting movements of layers to the right/bottom of the modified range x_move = (new_range[2] - new_range[0]) - (old_range[2] - old_range[0]) y_move = (new_range[3] - new_range[1]) - (old_range[3] - old_range[1]) # x-direction if x_direction: #if selection is completely to the left of new selection, nothing has to be done if selection_range[2] < old_range[0]: return selection_range #if selection is completely to the right of the new selection, move all points if selection_range[0] > old_range[2]: selection_range[0] = selection_range[0] + x_move selection_range[2] = selection_range[2] + x_move else: # Now we have a few options: # 1. the selection is completely part of the modified range (oldRange) # 2. the modified range (oldRange) is completely part of the selection # 3. the right side of the selection is modified # 4. the left side of the selection is modified # (1) If this selection was entirely part of the selected range m_removed, it will become as big as the new range m_inserted # NOTE: oldRange(1) is always == oldRange(3) because of input parameters in auto_discretize_x (index, index) if selection_range[0] >= old_range[0] and selection_range[2] <= old_range[2]: selection_range[0] = new_range[0] selection_range[2] = new_range[2] # NOTE: if we cut out a section, left will be > then right, allowing us to remove deleted assignments later on # (2) If the selected range m_removed was a part of this selection, we only need to move the right boundary elif selection_range[0] <= old_range[0] and selection_range[2] >= old_range[2]: selection_range[2] = selection_range[2] + x_move #(3) and (4) are only valid if the new range is cut out if (newRange(3) - newRange(1)) < 0 else: #Check for option (3) if selection_range[0] < old_range[0] and selection_range[2] <= old_range[2]: selection_range[2] = old_range[0] - 1 # otherwise option (4) else: selection_range[0] = old_range[0] selection_range[1] = selection_range[2] + x_move #y-direction else: # if selection is completely to the top of new selection, nothing has to be done if selection_range[3] < old_range[1]: return selection_range # if selection is completely to the bottom of the new selection, move all points if selection_range[1] > old_range[3]: selection_range[1] = selection_range[1] + y_move selection_range[3] = selection_range[3] + y_move else: # Now we have a few options: # 1. the selection is completely part of the modified range (m_removed) # 2. the modified range (m_removed) is completely part of the selection # 3. the bottom side of the selection is modified # 4. the top side of the selection is modified # (1) If this selection was entirely part of the selected range m_removed,
<filename>ultracart/api/order_api.py # coding: utf-8 """ UltraCart Rest API V2 UltraCart REST API Version 2 # noqa: E501 OpenAPI spec version: 2.0.0 Contact: <EMAIL> Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from ultracart.api_client import ApiClient from ultracart.configuration import Configuration class OrderApi(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client @classmethod def fromApiKey(cls, apiKey, verify_ssl = True, debug = False): config = Configuration() config.api_key['x-ultracart-simple-key'] = apiKey config.debug = debug config.verify_ssl = verify_ssl api_client = ApiClient(configuration=config, header_name='X-UltraCart-Api-Version', header_value='2017-03-01') return OrderApi(api_client) def adjust_order_total(self, order_id, desired_total, **kwargs): # noqa: E501 """Adjusts an order total # noqa: E501 Adjusts an order total. Adjusts individual items appropriately and considers taxes. Desired total should be provided in the same currency as the order and must be less than the current total and greater than zero. This call will change the order total. It returns true if the desired total is achieved. If the goal seeking algorithm falls short (usually by pennies), this method returns back false. View the merchant notes for the order for further details. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.adjust_order_total(order_id, desired_total, async_req=True) >>> result = thread.get() :param async_req bool :param str order_id: The order id to cancel. (required) :param str desired_total: The desired total with no formatting. example 123.45 (required) :return: BaseResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.adjust_order_total_with_http_info(order_id, desired_total, **kwargs) # noqa: E501 else: (data) = self.adjust_order_total_with_http_info(order_id, desired_total, **kwargs) # noqa: E501 return data def adjust_order_total_with_http_info(self, order_id, desired_total, **kwargs): # noqa: E501 """Adjusts an order total # noqa: E501 Adjusts an order total. Adjusts individual items appropriately and considers taxes. Desired total should be provided in the same currency as the order and must be less than the current total and greater than zero. This call will change the order total. It returns true if the desired total is achieved. If the goal seeking algorithm falls short (usually by pennies), this method returns back false. View the merchant notes for the order for further details. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.adjust_order_total_with_http_info(order_id, desired_total, async_req=True) >>> result = thread.get() :param async_req bool :param str order_id: The order id to cancel. (required) :param str desired_total: The desired total with no formatting. example 123.45 (required) :return: BaseResponse If the method is called asynchronously, returns the request thread. """ all_params = ['order_id', 'desired_total'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method adjust_order_total" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'order_id' is set if ('order_id' not in params or params['order_id'] is None): raise ValueError("Missing the required parameter `order_id` when calling `adjust_order_total`") # noqa: E501 # verify the required parameter 'desired_total' is set if ('desired_total' not in params or params['desired_total'] is None): raise ValueError("Missing the required parameter `desired_total` when calling `adjust_order_total`") # noqa: E501 collection_formats = {} path_params = {} if 'order_id' in params: path_params['order_id'] = params['order_id'] # noqa: E501 if 'desired_total' in params: path_params['desired_total'] = params['desired_total'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501 return self.api_client.call_api( '/order/orders/{order_id}/adjust_order_total/{desired_total}', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='BaseResponse', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def cancel_order(self, order_id, **kwargs): # noqa: E501 """Cancel an order # noqa: E501 Cancel an order on the UltraCart account. If the success flag is false, then consult the error message for why it failed. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.cancel_order(order_id, async_req=True) >>> result = thread.get() :param async_req bool :param str order_id: The order id to cancel. (required) :return: BaseResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.cancel_order_with_http_info(order_id, **kwargs) # noqa: E501 else: (data) = self.cancel_order_with_http_info(order_id, **kwargs) # noqa: E501 return data def cancel_order_with_http_info(self, order_id, **kwargs): # noqa: E501 """Cancel an order # noqa: E501 Cancel an order on the UltraCart account. If the success flag is false, then consult the error message for why it failed. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.cancel_order_with_http_info(order_id, async_req=True) >>> result = thread.get() :param async_req bool :param str order_id: The order id to cancel. (required) :return: BaseResponse If the method is called asynchronously, returns the request thread. """ all_params = ['order_id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method cancel_order" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'order_id' is set if ('order_id' not in params or params['order_id'] is None): raise ValueError("Missing the required parameter `order_id` when calling `cancel_order`") # noqa: E501 collection_formats = {} path_params = {} if 'order_id' in params: path_params['order_id'] = params['order_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501 return self.api_client.call_api( '/order/orders/{order_id}/cancel', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='BaseResponse', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_order(self, order_id, **kwargs): # noqa: E501 """Delete an order # noqa: E501 Delete an order on the UltraCart account. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_order(order_id, async_req=True) >>> result = thread.get() :param async_req bool :param str order_id: The order id to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_order_with_http_info(order_id, **kwargs) # noqa: E501 else: (data) = self.delete_order_with_http_info(order_id, **kwargs) # noqa: E501 return data def delete_order_with_http_info(self, order_id, **kwargs): # noqa: E501 """Delete an order # noqa: E501 Delete an order on the UltraCart account. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_order_with_http_info(order_id, async_req=True) >>> result = thread.get() :param async_req bool :param str order_id: The order id to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['order_id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_order" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'order_id' is set if ('order_id' not in params or params['order_id'] is None): raise ValueError("Missing the required parameter `order_id` when calling `delete_order`") # noqa: E501 collection_formats = {} path_params = {} if 'order_id' in params: path_params['order_id'] = params['order_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {}
<reponame>reepoi/ahj-registry import csv import datetime from django.core.checks import messages from django.forms import formset_factory from django.http import HttpResponseRedirect, HttpResponse from django.shortcuts import render from django.utils import timezone from .form import UserResetPasswordForm, UserDeleteToggleAPITokenForm, EditApproveForm, UserGenerateAPITokenForm from ..models import User, APIToken, Edit, AHJUserMaintains, Comment from ..usf import dict_filter_keys_start_with, ENUM_FIELDS from ..views_edits import apply_edits, reset_edit, edit_is_resettable, revert_edit def get_value_or_primary_key(obj, field): """ Retrieves the value of a field from an object. If the value is None, empty string is returned. If the field is an enum field, its value is returned. If the field is a related field, the its primary key is returned. """ value = getattr(obj, field) field_class_name = obj._meta.get_field(field).__class__.__name__ if value is None: value = '' elif field in ENUM_FIELDS: value = value.Value elif field_class_name == 'ForeignKey' or field_class_name == 'OneToOneField': value = value.pk return value class ExportCSVMixin: """ Mixin to for an admin model to inherit to add an export_csv admin action. """ def export_csv(self, request, queryset): """ Returns a CSV file exporting all the rows in the queryset. """ meta = self.model._meta field_names = [field.name for field in meta.fields] response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = f'attachment; filename={timezone.now()}_{self.model.__name__}_table.csv' writer = csv.writer(response) writer.writerow(field_names) for obj in queryset: writer.writerow([get_value_or_primary_key(obj, field) for field in field_names]) return response export_csv.short_description = 'Export CSV' def reset_password(user, raw_password): """ Sets and saves a user's password. """ user.set_password(<PASSWORD>) user.save() def user_reset_password(self, request, queryset): """ Admin action for the User model. The admin can set a new password for one user. The new password is hashed, and then saved. """ if 'apply' in request.POST: """ The form has been filled out and submitted. """ password = request.POST['password'] user_id = request.POST['_selected_action'] user = User.objects.get(UserID=user_id) reset_password(user, password) self.message_user(request, 'Success', level=messages.INFO) return HttpResponseRedirect(request.get_full_path()) if queryset.count() > 1: """ Only support setting the password for one user at a time. """ self.message_user(request, 'Please select one user when running this action.', level=messages.ERROR) return HttpResponseRedirect(request.get_full_path()) form = UserResetPasswordForm() return render(request, 'admin/user_reset_password.html', context={ 'request': request, 'user': queryset.first(), 'form': form }) user_reset_password.short_description = 'Reset password' def partition_by_field(queryset, field, value): """ Returns two querysets from the queryset: - queryset of rows whose field value matches the value - queryset of rows whose field value does not match the value """ with_field_value = queryset.filter(**{field: value}) without_field_value = queryset.exclude(**{field: value}) return with_field_value, without_field_value def set_date_from_str(date_str): """ Returns a date object from a string formatted in ``%Y-%m-%d``. """ try: return timezone.make_aware(datetime.datetime.strptime(date_str, '%Y-%m-%d')) except ValueError: return None def process_generate_api_token_data(post_data): """ This expects the post_data to contain an array called ``user_to_form``. Each item in this array is of the form: .. code-block:: python '<UserID>.<form_prefix>' (i.e. '1.form-0') Each form then may add two form data key-value pairs: .. code-block:: python '<form_prefix>-expiration_date': '<date>' (i.e. 'form-0-expiration_date': '2021-06-04') """ user_to_form_pairs = [pair.split('.') for pair in post_data.getlist('user_to_form')] user_form_data = [] for user_id, form_prefix in user_to_form_pairs: user = User.objects.get(UserID=user_id) form_data = dict_filter_keys_start_with(form_prefix, post_data) date_str = '-'.join([form_data.get('ExpirationDate_year', ''), form_data.get('ExpirationDate_month', ''), form_data.get('ExpirationDate_day', '')]) expiration_date = set_date_from_str(date_str=date_str) user_form_data.append({'user': user, 'expires': expiration_date}) return user_form_data def user_generate_api_token(self, request, queryset): """ Admin action for the User model. The admin can select one or more users and generate an API token for them. If selected users already have an API token, a new API will not be generated for them. """ if 'apply' in request.POST: """ The form has been filled out and submitted. """ action_data = process_generate_api_token_data(request.POST) for item in action_data: APIToken.objects.create(user=item['user'], expires=item['expires']) self.message_user(request, 'Success', level=messages.INFO) return HttpResponseRedirect(request.get_full_path()) users_without_tokens, users_with_tokens = partition_by_field(queryset, 'api_token', None) users_with_tokens = users_with_tokens.order_by('Email') users_without_tokens = users_without_tokens.order_by('Email') formset = formset_factory(UserGenerateAPITokenForm, extra=queryset.count())() return render(request, 'admin/user_generate_api_token.html', context={ 'request': request, 'users_without_tokens': users_without_tokens, 'users_without_tokens_and_forms': zip(users_without_tokens, formset), 'user_token_tuples': zip(users_with_tokens, users_with_tokens.values_list('api_token', flat=True)) }) user_generate_api_token.short_description = 'Generate API token' def delete_toggle_api_token(user, toggle=None, delete=False): """ Modifies a user's API token by either deleting it or toggling it on/off. """ if not APIToken.objects.filter(user=user): return if delete: user.api_token.delete() return if toggle is not None: user.api_token.is_active = toggle user.api_token.save() def set_toggle(form_value): """ Converts the input values on an HTML dropdown with values ``On, Off, DoNothing`` to boolean values. """ if form_value == 'On': return True elif form_value == 'Off': return False else: return None def set_delete(form_value): """ Used with an HTML checkbox input. Return ``True`` if **form_value** is ``on``. """ if form_value == 'on': return True return False def process_delete_toggle_api_token_data(post_data): """ This expects the post_data to contain an array called ``user_to_form``. Each item in this array is of the form: .. code-block:: python '<UserID>.<form_prefix>' (i.e. '1.form-0') Each form then may add two form data key-value pairs: .. code-block:: python '<form_prefix>-toggle': '<On/Off/DoNothing>' (i.e. 'form-0-toggle': 'On') '<form_prefix>-delete_token': 'on' (i.e. 'form-0-delete_token': 'on') """ user_to_form_pairs = [pair.split('.') for pair in post_data.getlist('user_to_form')] user_form_data = [] for user_id, form_prefix in user_to_form_pairs: user = User.objects.get(UserID=user_id) form_data = dict_filter_keys_start_with(form_prefix, post_data) toggle_api_token = form_data.get('toggle', '') delete_api_token = form_data.get('delete_token', '') user_form_data.append({'user': user, 'toggle': set_toggle(toggle_api_token), 'delete': set_delete(delete_api_token)}) return user_form_data def user_delete_toggle_api_token(self, request, queryset): """ Admin action for the User model. The admin can select one or more users and delete or toggle on/off each user's API token. If selected users do not have an API token, there will be no options displayed for them. """ if 'apply' in request.POST: """ The form has been filled out and submitted. """ action_data = process_delete_toggle_api_token_data(request.POST) for item in action_data: delete_toggle_api_token(user=item['user'], toggle=item['toggle'], delete=item['delete']) self.message_user(request, 'Success', level=messages.INFO) return HttpResponseRedirect(request.get_full_path()) users_without_tokens, users_with_tokens = partition_by_field(queryset, 'api_token', None) users_with_tokens = users_with_tokens.order_by('Email') users_without_tokens = users_without_tokens.order_by('Email') formset = formset_factory(UserDeleteToggleAPITokenForm, extra=queryset.count())() return render(request, 'admin/user_delete_toggle_api_token.html', context={ 'request': request, 'users_without_tokens': users_without_tokens, 'users_and_forms': zip(users_with_tokens, formset), 'users_with_tokens': users_with_tokens }) user_delete_toggle_api_token.short_description = 'Delete/Toggle API Token' def build_url_parameters_for_change_list_filtering(queryset, field_key_pairs): """ Builds a URL query of key-value pairs for each field of the form: .. code-block:: xml <field>=<value>,...,<value> (i.e. 'UserID=1,2,3') """ query = '?' for f in field_key_pairs: values = [str(v) for v in queryset.values_list(f['field'], flat=True)] query += f'{f["key"]}={",".join(values)}&' return query def field_key_pair(field, key): return {'field': field, 'key': key} def load_change_list_with_queryset(request, queryset, model_name, field_key_pairs): """ Creates the redirect response to a change list with a url query created from the queryset and field_key_pairs. """ query = build_url_parameters_for_change_list_filtering(queryset, field_key_pairs) return HttpResponseRedirect(f'{request.build_absolute_uri(f"/admin/ahj_app/{model_name}/")}{query}') def user_query_api_tokens(self, request, queryset): """ Admin action for the User model. Redirects the admin to a change list of the selected users' APITokens. Users without APITokens are filtered from the selection. """ model_name = 'apitoken' field_key_pairs = [field_key_pair('api_token__user', 'user')] queryset = queryset.exclude(api_token=None) return load_change_list_with_queryset(request, queryset, model_name, field_key_pairs) user_query_api_tokens.short_description = 'Query API Tokens' def user_query_ahjs_is_ahj_official_of(self, request, queryset): """ Admin action for the User model. Redirects the admin to a change list of AHJs the selected users are AHJ officials of. """ model_name = 'ahj' field_key_pairs = [field_key_pair('AHJPK', 'AHJPK')] queryset = AHJUserMaintains.objects.filter(UserID__in=queryset, MaintainerStatus=True) return load_change_list_with_queryset(request, queryset, model_name, field_key_pairs) user_query_ahjs_is_ahj_official_of.short_description = 'Query Is AHJ Official Of' def user_query_submitted_edits(self, request, queryset): """ Admin action for the User model. Redirects the admin to a change list of edits submitted by the selected users. """ model_name = 'edit' field_key_pairs = [field_key_pair('ChangedBy', 'ChangedBy')] queryset = Edit.objects.filter(ChangedBy__in=queryset) return load_change_list_with_queryset(request, queryset, model_name, field_key_pairs) user_query_submitted_edits.short_description = 'Query Submitted Edits' def user_query_approved_edits(self, request, queryset): """ Admin action for the User model. Redirects the admin to a change list of edits approved by the selected users. """ model_name = 'edit' field_key_pairs = [field_key_pair('ApprovedBy', 'ApprovedBy')] queryset = Edit.objects.filter(ApprovedBy__in=queryset) return load_change_list_with_queryset(request, queryset, model_name, field_key_pairs) user_query_approved_edits.short_description = 'Query Approved Edits' def user_query_submitted_comments(self, request, queryset): """ Admin action for the User model. Redirects the admin to a change list of comments submitted by the selected users. """ model_name = 'comment' field_key_pairs = [field_key_pair('UserID', 'UserID')] queryset = Comment.objects.filter(UserID__in=queryset) return load_change_list_with_queryset(request, queryset, model_name, field_key_pairs) user_query_submitted_comments.short_description = 'Query Submitted Comments' def process_approve_edits_data(post_data, requesting_user): """ This expects the post_data to contain an array called ``edit_to_form``. Each item in this array is of the form: .. code-block:: python '<EditID>.<form_prefix>' (i.e. '1.form-0') Each form then may add two form data key-value pairs: .. code-block:: python '<form_prefix>-date_effective': '<date>' (i.e. 'form-0-date_effective': '2021-06-04') """ edit_to_form_pairs = [pair.split('.') for pair in post_data.getlist('edit_to_form')] edit_form_data = [] for edit_id, form_prefix in edit_to_form_pairs: edit = Edit.objects.get(EditID=edit_id) form_data = dict_filter_keys_start_with(form_prefix, post_data) date_str = '-'.join([form_data.get('DateEffective_year', ''), form_data.get('DateEffective_month', ''), form_data.get('DateEffective_day', '')]) date_effective = set_date_from_str(date_str=date_str) if date_effective is None: continue apply_now = date_effective.date() <= datetime.date.today() if apply_now: date_effective = timezone.now() edit_form_data.append({'edit': edit, 'approved_by': requesting_user, 'date_effective': date_effective, 'apply_now': apply_now}) return edit_form_data def approve_edit(edit, approved_by, date_effective, apply_now): """ Sets the fields
volume * 1e-8 * 1e-8 * 1e-8 # in cm^3 rn = (1e0 / volume) * (codata_e2_mc2 * 1e2) dspacing = bragg_metrictensor(cryst['a'], cryst['b'], cryst['c'], cryst['alpha'], cryst['beta'], cryst['gamma'], HKL=[hh, kk, ll]) dspacing *= 1e-8 # in cm txt += "# RN = (e^2/(m c^2))/V) [cm^-2], d spacing [cm]\n" txt += "%e %e \n" % (rn, dspacing) output_dictionary["rn"] = rn output_dictionary["dspacing"] = dspacing atom = cryst['atom'] number_of_atoms = len(atom) list_Zatom = [atom[i]['Zatom'] for i in range(len(atom))] list_fraction = [atom[i]['fraction'] for i in range(number_of_atoms)] try: list_charge = [atom[i]['charge'] for i in range(number_of_atoms)] except: list_charge = [0.0] * number_of_atoms list_x = [atom[i]['x'] for i in range(number_of_atoms)] list_y = [atom[i]['y'] for i in range(number_of_atoms)] list_z = [atom[i]['z'] for i in range(number_of_atoms)] # calculate array of temperature factor for all atoms # # Consider anisotropic temperature factor # <NAME>, <EMAIL> # A dummy dictionary Aniso with start =0 if no aniso temperature factor input # start if 'Aniso' in cryst.keys() and cryst['Aniso'][0]['start'] > 0: # most crystals have no Anisotropic input TFac = TemperFactor(1.0 / (2.0 * dspacing * 1e8), cryst['Aniso'], Miller={'h': hh, 'k': kk, 'l': ll}, \ cell={'a': cryst['a'], 'b': cryst['b'], 'c': cryst['c']}, n=len(atom)) B_TFac = 1 else: B_TFac = 0 # # # list_temper = [] list_temper_label = [] if ANISO_SEL == 0: for i in range(number_of_atoms): list_temper.append(temper) list_temper_label.append(-1) elif ANISO_SEL == 1: if B_TFac: for i in range(number_of_atoms): list_temper.append(TFac[0, i]) list_temper_label.append(TFac[2, i]) else: raise Exception("No crystal data to calculate isotropic temperature factor for crystal %s" % descriptor) elif ANISO_SEL == 2: if B_TFac: for i in range(number_of_atoms): list_temper.append(TFac[1, i]) list_temper_label.append(TFac[2, i]) else: raise Exception("No crystal data to calculate anisotropic temperature factor for crystal %s" % descriptor) list_AtomicName = [] for i in range(number_of_atoms): s = atomic_symbols()[atom[i]['Zatom']] # if sourceCryst == 1: # charge is not available in xraylib try: # charge is not available in xraylib if atom[i]['charge'] != 0.0: # if charge is 0, s is symbol only, not B0, etc s = s + f'%+.6g' % atom[i]['charge'] except: pass list_AtomicName.append(s) # identify the prototypical atoms labels_prototypical = [] for i in range(number_of_atoms): labels_prototypical.append("Z=%d C=%g F=%g T=%g" % (list_Zatom[i], list_charge[i], list_fraction[i], list_temper_label[i])) if do_not_prototype: indices_prototypical = numpy.arange(number_of_atoms) # different with diff_pat for complex crystal else: indices_prototypical = numpy.unique(labels_prototypical, return_index=True)[1] number_of_prototypical_atoms = len(indices_prototypical) # for i in range(number_of_prototypical_atoms): # print(" >>> ", i, indices_prototypical[i], labels_prototypical[indices_prototypical[i]]) # # for i in indices_prototypical: # print(" >>>>> ", i, labels_prototypical[i]) # # print(">>>> list_labels", len(labels_prototypical), len(indices_prototypical), labels_prototypical) # # get f0 coefficients # # f0coeffs = [] # if sourceF0 == 0: # for i in indices_prototypical: # f0coeffs.append(f0_xop(atom[i]['Zatom'])) # elif sourceF0 == 1: # for i in indices_prototypical: # f0coeffs.append(material_constants_library.f0_with_fractional_charge(atom[i]['Zatom'], atom[i]['charge']) ) # elif sourceF0 == 2: # total_charge_flag = numpy.abs(numpy.array(list_charge)).sum() # note the abs(): to be used as flag... # # if total_charge_flag != 0: # Use dabax # for i in indices_prototypical: # f0coeffs.append(material_constants_library.f0_with_fractional_charge(atom[i]['Zatom'], atom[i]['charge'])) # else: # use xraylib # if 'AtomicName' not in atom[0].keys(): # for i in indices_prototypical: #normal case come in here # f0coeffs.append(f0_xop(atom[i]['Zatom'])) # else: #for case with like 'Y3+' entries in f0_xop # import re # for i in indices_prototypical: # x = atom[i]['AtomicName'] # tmp_x = re.search('(^[a-zA-Z]*)',x) # if tmp_x.group(0) == x: # f0coeffs.append(f0_xop(atom[i]['Zatom'])) #neutral atom # else: # f0coeffs.append(f0_xop(0,AtomicName=x)) #charged atom f0coeffs = [] for i in indices_prototypical: try: charge = atom[i]['charge'] except: charge = 0.0 f0coeffs.append(f0_xop_with_fractional_charge(atom[i]['Zatom'], charge)) txt += "# Number of different element-sites in unit cell NBATOM:\n%d \n" % number_of_prototypical_atoms output_dictionary["nbatom"] = number_of_prototypical_atoms txt += "# for each element-site, the number of scattering electrons (Z_i + charge_i)\n" atnum_list = [] for i in indices_prototypical: txt += "%f " % (list_Zatom[i] - list_charge[i]) atnum_list.append(list_Zatom[i] - list_charge[i]) txt += "\n" output_dictionary["atnum"] = atnum_list txt += "# for each element-site, the occupation factor\n" unique_fraction = [list_fraction[i] for i in indices_prototypical] for z in unique_fraction: txt += "%g " % (z) txt += "\n" output_dictionary["fraction"] = unique_fraction txt += "# for each element-site, the temperature factor\n" # temperature parameter unique_temper = [] for i in indices_prototypical: txt += "%g " % list_temper[i] unique_temper.append(list_temper[i]) txt += "\n" output_dictionary["temper"] = unique_temper # # Geometrical part of structure factor: G and G_BAR # txt += "# for each type of element-site, COOR_NR=G_0\n" list_multiplicity = [] for i in indices_prototypical: # zz = list_AtomicName[i] # fraction = list_fraction[i] # temper = list_temper[i] # count = 0 # for j in range(len(list_Zatom)): # if (list_AtomicName[j] == zz) and (list_fraction[j] == fraction) and (list_temper[j] == temper): count += 1 if do_not_prototype: txt += "%d " % 1 list_multiplicity.append(1) else: count = 0 for j in range(number_of_atoms): if labels_prototypical[j] == labels_prototypical[i]: count += 1 txt += "%d " % count list_multiplicity.append(count) txt += "\n" output_dictionary["G_0"] = list_multiplicity txt += "# for each type of element-site, G and G_BAR (both complex)\n" list_g = [] list_g_bar = [] for i in indices_prototypical: if do_not_prototype: # # ga_item = numpy.exp(2j * numpy.pi * (hh * list_x[i] + kk * list_y[i] + ll * list_z[i])) # ga += ga_item ga = numpy.exp(2j * numpy.pi * (hh * list_x[i] + kk * list_y[i] + ll * list_z[i])) else: ga = 0.0 + 0j for j in range(number_of_atoms): if labels_prototypical[j] == labels_prototypical[i]: # if list_AtomicName[j] == zz and list_fraction[j] == ff and list_temper[j] == tt: ga_item = numpy.exp(2j * numpy.pi * (hh * list_x[j] + kk * list_y[j] + ll * list_z[j])) ga += ga_item txt += "(%g,%g) \n" % (ga.real, ga.imag) txt += "(%g,%g) \n" % (ga.real, -ga.imag) list_g.append(ga) list_g_bar.append(ga.conjugate()) output_dictionary["G"] = list_g output_dictionary["G_BAR"] = list_g_bar # # F0 part # txt += "# for each type of element-site, the number of f0 coefficients followed by them\n" for f0coeffs_item in f0coeffs: txt += "%d " % len(f0coeffs_item) for cc in f0coeffs_item: txt += "%g " % cc txt += "\n" output_dictionary["f0coeff"] = f0coeffs # <NAME>, use ceil to round up, otherwise we may get actual max energy less than emax npoint = int(numpy.ceil(((emax - emin) / estep + 1))) txt += "# The number of energy points NPOINT: \n" txt += ("%i \n") % npoint output_dictionary["npoint"] = npoint txt += "# for each energy point, energy, F1(1),F2(1),...,F1(nbatom),F2(nbatom)\n" list_energy = [] out_f1 = numpy.zeros((len(indices_prototypical), npoint), dtype=float) out_f2 = numpy.zeros((len(indices_prototypical), npoint), dtype=float) out_fcompton = numpy.zeros((len(indices_prototypical), npoint), dtype=float) # todo: is complex? if isinstance(material_constants_library, DabaxXraylib ): # vectorize with DABAX energies = numpy.zeros(npoint) for i in range(npoint): energies[i] = (emin + estep * i) DABAX_F_RESULTS = [] for j, jj in enumerate(indices_prototypical): DABAX_F_RESULTS.append(numpy.array( material_constants_library.FiAndFii(list_Zatom[jj], energies * 1e-3))) for i in range(npoint): energy = (emin + estep * i) txt += ("%20.11e \n") % (energy) list_energy.append(energy) for j, jj in enumerate(indices_prototypical): f1a = (DABAX_F_RESULTS[j])[0, i] # material_constants_library.Fi(list_Zatom[jj], energy * 1e-3) f2a = -(DABAX_F_RESULTS[j])[1, i] # -material_constants_library.Fii(list_Zatom[jj], energy * 1e-3) txt += (" %20.11e %20.11e 1.000 \n") % (f1a, f2a) out_f1[j, i] = f1a out_f2[j, i] = f2a out_fcompton[j, i] = 1.0 else: # make a simple loop with xraylib (fast) for i in range(npoint): energy = (emin + estep * i) txt += ("%20.11e \n") % (energy) list_energy.append(energy) for j,jj in enumerate(indices_prototypical): f1a = material_constants_library.Fi(list_Zatom[jj], energy * 1e-3) f2a = -material_constants_library.Fii(list_Zatom[jj], energy * 1e-3) txt += (" %20.11e %20.11e 1.000 \n") % (f1a, f2a) out_f1[j, i] = f1a out_f2[j, i] = f2a out_fcompton[j, i] = 1.0 output_dictionary["energy"] = list_energy output_dictionary["f1"] = out_f1 output_dictionary["f2"] = out_f2 output_dictionary["fcompton"] = out_fcompton if fileout != None: bragg_preprocessor_file_v2_write(output_dictionary, fileout) # with open(fileout, "w") as f: # f.write(txt) # if verbose: print("File written to disk: %s" % fileout) return output_dictionary # todo: rename def TemperFactor(sinTheta_lambda,anisos,Miller={'h':1,'k':1,'l':1},cell={'a':23.44,'b':23.44,'c':23.44},n=1936): ''' #+ # Singapore Synchrotron Light Source (SSLS) # :Author: <NAME>, <EMAIL> # :Name: TemperFactor # :Purpose: Calculation isotropic & anisotropic temerature factors # :Input: # Miller: Miller indices # cell: dictionary of lattice [a,b,c] in units of Angstrom # sinTheta_lambda: Sin(theta)/lambda, lambda in units of
**ClusterIdentifier** *(string) --* The pending or in-progress change of the new identifier for the cluster. - **PubliclyAccessible** *(boolean) --* The pending or in-progress change of the ability to connect to the cluster from the public network. - **EnhancedVpcRouting** *(boolean) --* An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see `Enhanced VPC Routing <https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html>`__ in the Amazon Redshift Cluster Management Guide. If this option is ``true`` , enhanced VPC routing is enabled. Default: false - **MaintenanceTrackName** *(string) --* The name of the maintenance track that the cluster will change to during the next maintenance window. - **EncryptionType** *(string) --* The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy. - **ClusterVersion** *(string) --* The version ID of the Amazon Redshift engine that is running on the cluster. - **AllowVersionUpgrade** *(boolean) --* A boolean value that, if ``true`` , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window. - **NumberOfNodes** *(integer) --* The number of compute nodes in the cluster. - **PubliclyAccessible** *(boolean) --* A boolean value that, if ``true`` , indicates that the cluster can be accessed from a public network. - **Encrypted** *(boolean) --* A boolean value that, if ``true`` , indicates that data in the cluster is encrypted at rest. - **RestoreStatus** *(dict) --* A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot. - **Status** *(string) --* The status of the restore action. Returns starting, restoring, completed, or failed. - **CurrentRestoreRateInMegaBytesPerSecond** *(float) --* The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. - **SnapshotSizeInMegaBytes** *(integer) --* The size of the set of snapshot data used to restore the cluster. - **ProgressInMegaBytes** *(integer) --* The number of megabytes that have been transferred from snapshot storage. - **ElapsedTimeInSeconds** *(integer) --* The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. - **EstimatedTimeToCompletionInSeconds** *(integer) --* The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. - **DataTransferProgress** *(dict) --* - **Status** *(string) --* Describes the status of the cluster. While the transfer is in progress the status is ``transferringdata`` . - **CurrentRateInMegaBytesPerSecond** *(float) --* Describes the data transfer rate in MB's per second. - **TotalDataInMegaBytes** *(integer) --* Describes the total amount of data to be transfered in megabytes. - **DataTransferredInMegaBytes** *(integer) --* Describes the total amount of data that has been transfered in MB's. - **EstimatedTimeToCompletionInSeconds** *(integer) --* Describes the estimated number of seconds remaining to complete the transfer. - **ElapsedTimeInSeconds** *(integer) --* Describes the number of seconds that have elapsed during the data transfer. - **HsmStatus** *(dict) --* A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command. Values: active, applying - **HsmClientCertificateIdentifier** *(string) --* Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM. - **HsmConfigurationIdentifier** *(string) --* Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM. - **Status** *(string) --* Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command. Values: active, applying - **ClusterSnapshotCopyStatus** *(dict) --* A value that returns the destination region and retention period that are configured for cross-region snapshot copy. - **DestinationRegion** *(string) --* The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled. - **RetentionPeriod** *(integer) --* The number of days that automated snapshots are retained in the destination region after they are copied from a source region. - **ManualSnapshotRetentionPeriod** *(integer) --* The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely. The value must be either -1 or an integer between 1 and 3,653. - **SnapshotCopyGrantName** *(string) --* The name of the snapshot copy grant. - **ClusterPublicKey** *(string) --* The public key for the cluster. - **ClusterNodes** *(list) --* The nodes in the cluster. - *(dict) --* The identifier of a node in a cluster. - **NodeRole** *(string) --* Whether the node is a leader node or a compute node. - **PrivateIPAddress** *(string) --* The private IP address of a node within a cluster. - **PublicIPAddress** *(string) --* The public IP address of a node within a cluster. - **ElasticIpStatus** *(dict) --* The status of the elastic IP (EIP) address. - **ElasticIp** *(string) --* The elastic IP (EIP) address for the cluster. - **Status** *(string) --* The status of the elastic IP (EIP) address. - **ClusterRevisionNumber** *(string) --* The specific revision number of the database in the cluster. - **Tags** *(list) --* The list of tags for the cluster. - *(dict) --* A tag consisting of a name/value pair for a resource. - **Key** *(string) --* The key, or name, for the resource tag. - **Value** *(string) --* The value for the resource tag. - **KmsKeyId** *(string) --* The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster. - **EnhancedVpcRouting** *(boolean) --* An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see `Enhanced VPC Routing <https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html>`__ in the Amazon Redshift Cluster Management Guide. If this option is ``true`` , enhanced VPC routing is enabled. Default: false - **IamRoles** *(list) --* A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. - *(dict) --* An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services. - **IamRoleArn** *(string) --* The Amazon Resource Name (ARN) of the IAM role, for example, ``arn:aws:iam::123456789012:role/RedshiftCopyUnload`` . - **ApplyStatus** *(string) --* A value that describes the status of the IAM role's association with an Amazon Redshift cluster. The following are possible statuses and descriptions. * ``in-sync`` : The role is available for use by the cluster. * ``adding`` : The role is in the process of being associated with the cluster. * ``removing`` : The role is in the process of being disassociated with the cluster. - **PendingActions** *(list) --* Cluster operations that are waiting to be started. - *(string) --* - **MaintenanceTrackName** *(string) --* The name of the maintenance track for the cluster. - **ElasticResizeNumberOfNodeOptions** *(string) --* The number of nodes that you can resize the cluster to with the elastic resize method. - **DeferredMaintenanceWindows** *(list) --* Describes a group of ``DeferredMaintenanceWindow`` objects. - *(dict) --* Describes a deferred maintenance window - **DeferMaintenanceIdentifier** *(string) --* A unique identifier for the maintenance window. - **DeferMaintenanceStartTime** *(datetime) --* A timestamp
tag1 color was not updated tag1 = SongTag.objects.get(pk=self.tag1.id) self.assertNotEqual(tag1.color_hue, 256) def test_post_song_embedded(self): """Test to create a song with nested artists, tags and works.""" # login as manager self.authenticate(self.manager) # pre assert the amount of songs self.assertEqual(Song.objects.count(), 2) self.assertEqual(Artist.objects.count(), 2) self.assertEqual(SongTag.objects.count(), 2) self.assertEqual(Work.objects.count(), 3) # create a new song song = { "title": "Song3", "filename": "song3", "directory": "directory", "duration": 0, "artists": [{"name": self.artist1.name}, {"name": "Artist3"}], "tags": [{"name": "TAG3"}, {"name": self.tag1.name}], "works": [ { "work": { "title": "Work4", "subtitle": "subtitle4", "work_type": {"query_name": self.wt1.query_name}, }, "link_type": "OP", "link_type_number": None, "episodes": "", }, { "work": { "title": self.work1.title, "subtitle": self.work1.subtitle, "work_type": {"query_name": self.work1.work_type.query_name}, }, "link_type": "ED", "link_type_number": 2, "episodes": "1", }, ], } response = self.client.post(self.url, song) # assert the response self.assertEqual(response.status_code, status.HTTP_201_CREATED) # assert the created song song = Song.objects.get(title="Song3") self.assertIsNotNone(song) self.assertEqual(song.filename, "song3") self.assertEqual(song.directory, "directory") self.assertEqual(song.duration, timedelta(0)) # assert the created artists self.assertEqual(Artist.objects.count(), 3) artist3 = Artist.objects.get(name="Artist3") self.assertIsNotNone(artist3) self.assertCountEqual(song.artists.all(), [self.artist1, artist3]) # assert the created tags self.assertEqual(SongTag.objects.count(), 3) tag3 = SongTag.objects.get(name="TAG3") self.assertIsNotNone(tag3) self.assertCountEqual(song.tags.all(), [self.tag1, tag3]) # assert the created works self.assertEqual(Work.objects.count(), 4) work4 = Work.objects.get( title="Work4", subtitle="subtitle4", work_type=self.wt1 ) self.assertIsNotNone(work4) song_work_link_1 = SongWorkLink.objects.get(song=song, work=self.work1) self.assertIsNotNone(song_work_link_1) self.assertEqual(song_work_link_1.link_type, SongWorkLink.ENDING) self.assertEqual(song_work_link_1.link_type_number, 2) self.assertEqual(song_work_link_1.episodes, "1") song_work_link_4 = SongWorkLink.objects.get(song=song, work=work4) self.assertIsNotNone(song_work_link_4) self.assertEqual(song_work_link_4.link_type, SongWorkLink.OPENING) self.assertIsNone(song_work_link_4.link_type_number) self.assertEqual(song_work_link_4.episodes, "") self.assertCountEqual( song.songworklink_set.all(), [song_work_link_1, song_work_link_4] ) def test_post_song_embedded_empty(self): """Test to create a song with empty keys for artists, tags and works.""" # login as manager self.authenticate(self.manager) # pre assert the amount of songs self.assertEqual(Song.objects.count(), 2) self.assertEqual(Artist.objects.count(), 2) self.assertEqual(SongTag.objects.count(), 2) self.assertEqual(Work.objects.count(), 3) # create a new song song = { "title": "Song3", "filename": "song3", "directory": "directory", "duration": 0, "artists": [], "tags": [], "works": [], "detail": "", "lyrics": "", } response = self.client.post(self.url, song) # assert the response self.assertEqual(response.status_code, status.HTTP_201_CREATED) # assert the created song song = Song.objects.get(title="Song3") self.assertIsNotNone(song) self.assertEqual(song.filename, "song3") self.assertEqual(song.directory, "directory") self.assertEqual(song.duration, timedelta(0)) # assert no new artist, tag or work have been created self.assertEqual(Artist.objects.count(), 2) self.assertEqual(SongTag.objects.count(), 2) self.assertEqual(Work.objects.count(), 3) def test_post_song_simple_multi(self): """Test to create two songs without nested artists, tags nor works.""" # login as manager self.authenticate(self.manager) # pre assert the amount of songs self.assertEqual(Song.objects.count(), 2) # create a new song songs = [ { "title": "Song3", "filename": "song3", "directory": "directory", "duration": 0, "lyrics": "mary had a little lamb", "version": "version 1", "detail": "test", "detail_video": "here", }, { "title": "Song4", "filename": "song4", "directory": "directory", "duration": 0, "lyrics": "", "version": "", "detail": "", "detail_video": "", }, ] response = self.client.post(self.url, songs) # assert the response self.assertEqual(response.status_code, status.HTTP_201_CREATED) # assert the amount of songs self.assertEqual(Song.objects.count(), 4) # assert the created songs Song.objects.get(title="Song3") Song.objects.get(title="Song4") def test_post_song_embedded_work_subtitle(self): """Test work is created even if similar exists with different subtitle.""" # Add a subtitle to work1 self.work1.subtitle = "returns" self.work1.save() # login as manager self.authenticate(self.manager) # pre assert the amount of songs self.assertEqual(Song.objects.count(), 2) self.assertEqual(Artist.objects.count(), 2) self.assertEqual(SongTag.objects.count(), 2) self.assertEqual(Work.objects.count(), 3) # create a new song # The works is same title and worktype as existing work, but without subtitle # This should create a new work song = { "title": "Song3", "filename": "song3", "directory": "directory", "duration": 0, "artists": [], "tags": [], "works": [ { "work": { "title": self.work1.title, "work_type": {"query_name": self.work1.work_type.query_name}, }, "link_type": "ED", "link_type_number": 2, "episodes": "1", } ], } response = self.client.post(self.url, song) # assert the response self.assertEqual(response.status_code, status.HTTP_201_CREATED) # assert the created song song = Song.objects.get(title="Song3") self.assertIsNotNone(song) self.assertEqual(song.filename, "song3") self.assertEqual(song.directory, "directory") self.assertEqual(song.duration, timedelta(0)) # assert a new work was created self.assertEqual(Work.objects.count(), 4) workNew = Work.objects.get(title="Work1", subtitle="", work_type=self.wt1) self.assertIsNotNone(workNew) class SongViewTestCase(LibraryAPITestCase): def setUp(self): # create a user without any rights self.user = self.create_user("TestUser") # create a manager self.manager = self.create_user("TestManager", library_level=UserModel.MANAGER) # create test data self.create_test_data() # Create urls to access these playlist entries self.url_song1 = reverse("library-song", kwargs={"pk": self.song1.id}) self.url_song2 = reverse("library-song", kwargs={"pk": self.song2.id}) def test_put_song_simple(self): """Test to update a song without nested artists, tags nor works.""" # login as manager self.authenticate(self.manager) # create a new song song = { "title": "Song1 new", "filename": "song1 new", "directory": "directory new", "duration": timedelta(seconds=1), "lyrics": "mary had a little lamb", "version": "version 1", "detail": "test", "detail_video": "here", "has_instrumental": True, } response = self.client.put(self.url_song1, song) # assert the response self.assertEqual(response.status_code, status.HTTP_200_OK) # assert the created song song = Song.objects.get(pk=self.song1.id) self.assertEqual(song.title, "Song1 new") self.assertEqual(song.filename, "song1 new") self.assertEqual(song.directory, "directory new") self.assertEqual(song.duration, timedelta(seconds=1)) self.assertEqual(song.lyrics, "mary had a little lamb") self.assertEqual(song.version, "version 1") self.assertEqual(song.detail, "test") self.assertEqual(song.detail_video, "here") self.assertTrue(song.has_instrumental) def test_put_song_embedded(self): """Test to update a song with nested artists, tags and works.""" # login as manager self.authenticate(self.manager) # pre assert the amount of songs self.assertEqual(Artist.objects.count(), 2) self.assertEqual(SongTag.objects.count(), 2) self.assertEqual(Work.objects.count(), 3) # create a new song song = { "title": "Song1 new", "filename": "song1 new", "directory": "directory new", "duration": timedelta(seconds=1), "artists": [{"name": self.artist1.name}, {"name": "Artist3"}], "tags": [{"name": "TAG3"}, {"name": self.tag1.name}], "works": [ { "work": { "title": "Work4", "subtitle": "subtitle4", "work_type": {"query_name": self.wt1.query_name}, }, "link_type": "OP", "link_type_number": None, "episodes": "", }, { "work": { "title": self.work1.title, "subtitle": self.work1.subtitle, "work_type": {"query_name": self.work1.work_type.query_name}, }, "link_type": "ED", "link_type_number": 2, "episodes": "1", }, ], } response = self.client.put(self.url_song1, song) # assert the response self.assertEqual(response.status_code, status.HTTP_200_OK) # assert the created song song = Song.objects.get(pk=self.song1.pk) self.assertEqual(song.title, "Song1 new") self.assertEqual(song.filename, "song1 new") self.assertEqual(song.directory, "directory new") self.assertEqual(song.duration, timedelta(seconds=1)) # assert the created artists self.assertEqual(Artist.objects.count(), 3) artist3 = Artist.objects.get(name="Artist3") self.assertIsNotNone(artist3) self.assertCountEqual(song.artists.all(), [self.artist1, artist3]) # assert the created tags self.assertEqual(SongTag.objects.count(), 3) tag3 = SongTag.objects.get(name="TAG3") self.assertIsNotNone(tag3) self.assertCountEqual(song.tags.all(), [self.tag1, tag3]) # assert the created works self.assertEqual(Work.objects.count(), 4) work4 = Work.objects.get( title="Work4", subtitle="subtitle4", work_type=self.wt1 ) self.assertIsNotNone(work4) song_work_link_1 = SongWorkLink.objects.get(song=song, work=self.work1) self.assertIsNotNone(song_work_link_1) self.assertEqual(song_work_link_1.link_type, SongWorkLink.ENDING) self.assertEqual(song_work_link_1.link_type_number, 2) self.assertEqual(song_work_link_1.episodes, "1") song_work_link_4 = SongWorkLink.objects.get(song=song, work=work4) self.assertIsNotNone(song_work_link_4) self.assertEqual(song_work_link_4.link_type, SongWorkLink.OPENING) self.assertIsNone(song_work_link_4.link_type_number) self.assertEqual(song_work_link_4.episodes, "") self.assertCountEqual( song.songworklink_set.all(), [song_work_link_1, song_work_link_4] ) def test_put_song_embedded_replace(self): """Test to update a song with already defined nested artists, tags and works.""" # login as manager self.authenticate(self.manager) # pre assert the amount of songs self.assertEqual(Artist.objects.count(), 2) self.assertEqual(SongTag.objects.count(), 2) self.assertEqual(Work.objects.count(), 3) # create a new song song = { "title": "Song2 new", "filename": "song2 new", "directory": "directory new", "duration": timedelta(seconds=1), "artists": [{"name": "Artist3"}], "tags": [{"name": "TAG3"}], "works": [ { "work": { "title": "Work4", "subtitle": "subtitle4", "work_type": {"query_name": self.wt1.query_name}, }, "link_type": "OP", "link_type_number": None, "episodes": "", } ], } response = self.client.put(self.url_song2, song) # assert the response self.assertEqual(response.status_code, status.HTTP_200_OK) # assert the created song song = Song.objects.get(pk=self.song2.pk) self.assertEqual(song.title, "Song2 new") self.assertEqual(song.filename, "song2 new") self.assertEqual(song.directory, "directory new") self.assertEqual(song.duration, timedelta(seconds=1)) # assert the created artists self.assertEqual(Artist.objects.count(), 3) artist3 = Artist.objects.get(name="Artist3") self.assertIsNotNone(artist3) self.assertCountEqual(song.artists.all(), [artist3]) # assert the created tags self.assertEqual(SongTag.objects.count(), 3) tag3 = SongTag.objects.get(name="TAG3") self.assertIsNotNone(tag3) self.assertCountEqual(song.tags.all(), [tag3]) # assert the created works self.assertEqual(Work.objects.count(), 4) work4 = Work.objects.get( title="Work4", subtitle="subtitle4", work_type=self.wt1 ) self.assertIsNotNone(work4) song_work_link_4 = SongWorkLink.objects.get(song=song, work=work4) self.assertIsNotNone(song_work_link_4) self.assertEqual(song_work_link_4.link_type, SongWorkLink.OPENING) self.assertIsNone(song_work_link_4.link_type_number) self.assertEqual(song_work_link_4.episodes, "") self.assertCountEqual(song.songworklink_set.all(), [song_work_link_4]) def test_put_song_embedded_identical(self): """Test to update a song with same nested artists, tags and works.""" # login as manager self.authenticate(self.manager) # pre assert the amount of songs self.assertEqual(Artist.objects.count(), 2) self.assertEqual(SongTag.objects.count(), 2) self.assertEqual(Work.objects.count(), 3) # create a new song song = { "title": "Song2 new", "filename": "song2 new", "directory": "directory new", "duration": timedelta(seconds=1), "artists": [{"name": self.artist1.name}], "tags": [{"name": self.tag1.name}], "works": [ { "work": { "title": self.work1.title, "subtitle": self.work1.subtitle, "work_type": {"query_name": self.work1.work_type.query_name}, }, "link_type": "OP", "link_type_number": None, "episodes": "", } ], } response = self.client.put(self.url_song2, song) # assert the response self.assertEqual(response.status_code, status.HTTP_200_OK) # assert the created song song = Song.objects.get(pk=self.song2.pk) self.assertEqual(song.title, "Song2 new") self.assertEqual(song.filename, "song2 new") self.assertEqual(song.directory, "directory new") self.assertEqual(song.duration, timedelta(seconds=1)) # assert the created artists self.assertEqual(Artist.objects.count(), 2) self.assertCountEqual(song.artists.all(), [self.artist1]) # assert the created tags self.assertEqual(SongTag.objects.count(), 2) self.assertCountEqual(song.tags.all(), [self.tag1]) # assert the created works self.assertEqual(Work.objects.count(), 3) song_work_link_1 = SongWorkLink.objects.get(song=song, work=self.work1) self.assertIsNotNone(song_work_link_1) self.assertEqual(song_work_link_1.link_type, SongWorkLink.OPENING) self.assertIsNone(song_work_link_1.link_type_number) self.assertEqual(song_work_link_1.episodes, "") self.assertCountEqual(song.songworklink_set.all(), [song_work_link_1]) def test_put_song_embedded_work_subtitle(self): """Test work is created even if similar exists with different subtitle.""" # Add a subtitle to work1 self.work1.subtitle = "returns" self.work1.save() # login as manager self.authenticate(self.manager) # pre assert the amount of songs self.assertEqual(Song.objects.count(), 2) self.assertEqual(Artist.objects.count(), 2) self.assertEqual(SongTag.objects.count(), 2) self.assertEqual(Work.objects.count(), 3) # update song1 # The works is same title and worktype as existing work, but without subtitle # This should create a new work song = { "title": "Song1", "filename": "file.mp4", "directory": "directory", "duration": 0, "artists": [], "tags": [],
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks_dev/dist.ipynb (unless otherwise specified). __all__ = ['get_distribution_var_factor_jaccard', 'pointwise_variance', 'estimate_mean_and_variance_from_neighbors_mixture', 'sample_from_neighbors_continuous', 'PointwiseMixture', 'JaccardPointwiseGaussianMixture', 'get_distribution_var_factor_jaccard', 'pointwise_variance', 'estimate_mean_and_variance_from_neighbors_mixture', 'sample_from_neighbors_continuous', 'PointwiseMixture', 'JaccardPointwiseGaussianMixture'] # Cell import numpy as np from scipy import stats from sklearn.base import DensityMixin, BaseEstimator from functools import partial from sklearn.utils import check_array # Cell def get_distribution_var_factor_jaccard(jac_dists, min_var_factor = 1e-2, alpha = 1, func = 'log'): ''' gets the variance factor of the "point distribution" given a jaccard distance from the query point to get the actual variance, use the variance factor alongside the variance of the observed variable in the neighbor points. example: get the 30 nearest neighbors, calculat their variance along some axis, then calculate the variance factor of each point and then multiply to get the "point variance in the contribution" for each point functions to test: tangent(x), 1/x, min_var - log(1-x) ''' funcs = ['log', 'inverse_dist', 'constant'] assert min_var_factor > 0 assert alpha > 0 #if sim = 1, var_factor = min_var_factor, if sim -> 0, var_factor -> inf if func == 'log': var_factor = min_var_factor - alpha*np.log(1-jac_dists) elif func == 'inverse_dist': var_factor = min_var_factor/(1-jac_dists)**alpha elif func == 'constant': var_factor = np.ones(jac_dist.shape) return var_factor def pointwise_variance(values, jac_dists, min_var_factor = 1e-2, alpha = 1, variance_mapper = 'log'): ''' gets the pointwise variance for each neighbor of a given point, considering the variance of the neighborhood and the variance factor of each point each point contributes to the estimation of the queried point, but this contribution has a variance associated with how much this point is alike the queried point. in this sense pointwise variance is defined, as the variance of the contribution of each point, given the variance of all the points and how similar the points in the neighborhood are to the queried point the variance of the contribution os different from the (sampling)weight of the point, since points with lower similarity will have a higher variance, but will be less likely to be sampled ''' #global variance is calculated with the wieghted full neighbors instead of the sampled values, in order to avoid 0 variance #if a single value is sampled #expand values if contains only one row, so np.cov won't break if values.shape[0] < 2: values = np.concatenate([values,values]) jac_dists = np.concatenate([jac_dists,jac_dists]) var_factor = get_distribution_var_factor_jaccard(jac_dists, min_var_factor, alpha, func = variance_mapper) var = np.cov(values, rowvar = False, aweights=(1- jac_dists.flatten())**alpha) #global variance weights are proportional to point similarities, not distance if var.ndim > 1: broadcasted_var_factor = jac_dists.reshape(jac_dists.shape[0],1)[:, np.newaxis] pointwise_variance = broadcasted_var_factor*var else: pointwise_variance = var_factor*var return pointwise_variance # Cell def estimate_mean_and_variance_from_neighbors_mixture( neighborhood_values, jac_dists, alpha = 1, variance_mapper = 'log', min_var_factor = 1e-2, ): ''' refs https://stats.stackexchange.com/questions/16608/what-is-the-variance-of-the-weighted-mixture-of-two-gaussians https://math.stackexchange.com/questions/195911/calculation-of-the-covariance-of-gaussian-mixtures at the end of the day, impelmented covaraince scaling according to scaling in the diagonal of covariance matrix for multidimensional case ''' weights = ((1-jac_dists)**alpha) weights = weights/weights.sum(axis = 0) #normalize neighborhood_values = check_array(neighborhood_values) if neighborhood_values.shape[-1] == 1: neighborhood_values = neighborhood_values.flatten() mean = np.average(neighborhood_values, weights = weights, axis = 0) pw_var = pointwise_variance( values = neighborhood_values, jac_dists = jac_dists, min_var_factor = min_var_factor, alpha = alpha, variance_mapper = variance_mapper ) #handle broadcasting extra_dims_n = pw_var.ndim - neighborhood_values.ndim extra_dims_w = pw_var.ndim - weights.ndim if extra_dims_n > 0: neighborhood_values = neighborhood_values.reshape(*neighborhood_values.shape, *extra_dims_n*[1]) if extra_dims_w > 0: weights = weights.reshape(*weights.shape, *extra_dims_w*[1]) #var = np.sum(weights*pw_var, axis = 0) wvar = np.sum(weights*pw_var, axis = 0) var = ( wvar+ np.sum(weights*(neighborhood_values**2), axis = 0) - mean**2 ) if var.ndim > 1: cov = np.sum(weights*pw_var, axis = 0) factor = var.diagonal()/cov.diagonal() #scale caovariances by variance scaling factor cov = cov*factor np.fill_diagonal(cov, var.diagonal()) var = cov return mean, var def sample_from_neighbors_continuous( neighborhood_values, jac_dists, size = 100, noise_type = 'normal', alpha = 1, scale_variance_pointwise = True, variance_mapper = 'log', min_var_factor = 1e-2, ): ''' samples from neighbor points with noise returns (samples, idxs) ''' #reshape values if neighborhood_values.ndim <= 1: neighborhood_values = neighborhood_values.reshape(-1,1) #handle noise_type valid_noise_types = ['normal', 'multivariate_normal', None] if not noise_type in valid_noise_types: raise ValueError(f'noise_type should be one of {valid_noise_types}, got {noise_type}') else: if noise_type == 'normal': var_preprocess = np.sqrt if not neighborhood_values.shape[-1] == 1: raise ValueError(f'for "normal" noise_type, data should be 1d, got shape {neighborhood_values.shape}') elif noise_type == 'multivariate_normal': var_preprocess = lambda x: x if not neighborhood_values.shape[-1] > 1: raise ValueError(f'for "multivariate_normal" noise_type, data should be N-dimensional for N >1, got shape {neighborhood_values.shape}') #apply alppha and l1-normalize sample weights sample_weights = (1- jac_dists)**alpha #transform distance into similarity with 1 - jac_d sample_weights = sample_weights/sample_weights.sum() #sample based on sample weights sampled_idxs = np.random.choice(np.arange(neighborhood_values.shape[0]),size = size, p = sample_weights, replace = True) #sampled_idxs = np.sort(sampled_idxs) if (not noise_type is None) and (len(neighborhood_values) > 1): #adds noise accordingly pw_var = pointwise_variance(neighborhood_values, jac_dists = jac_dists, min_var_factor = min_var_factor, alpha = alpha) counts = np.bincount(sampled_idxs) msk = counts > 0 unique, counts = np.arange(len(counts))[msk], counts[msk] noise_type = getattr(np.random, noise_type) if pw_var.ndim > 1: ndims = pw_var.shape[-1] mean = np.array([0]*ndims) else: mean = 0 samples = [] for i in range(unique.shape[0]): idx = unique[i] var = pw_var[idx] var = var_preprocess(var) noise = noise_type(mean, var, size = counts[i]) sampled_values = neighborhood_values[idx] + noise #samples = np.vstack([samples, sampled_values]) samples.append(sampled_values) if sampled_values.ndim > 1: samples = np.vstack(samples) else: samples = np.vstack([s.reshape(-1,1) for s in samples]) else: #sample without adding noise samples = neighborhood_values[sampled_idxs] return samples, sampled_idxs # Cell class PointwiseMixture(DensityMixin, BaseEstimator): def __init__(self, distribution = 'normal'): self.distribution = distribution return def sample(self, size = 1): ''' draw samples from mixture ''' idxs = np.random.choice(np.arange(len(self.dists_)), size = size, replace = True, p = self.weights_) counts = np.bincount(idxs) msk = counts > 0 unique, counts = np.arange(len(counts))[msk], counts[msk] samples = [] for i in range(len(unique)): idx = unique[i] n_samples = counts[i] sampled_values = self.dists_[idx](size = n_samples) samples.append(sampled_values) if sampled_values.ndim > 1: samples = np.vstack(samples) else: samples = np.vstack([s.reshape(-1,1) for s in samples]) return samples def fit(self, loc, std, weights = None): ''' fits a mixture estimating pointwise varainces from proposed method ''' if weights is None: weights = np.ones((len(loc),)) if not loc.ndim == 1: raise ValueError(f'loc should be 1d, got shape {loc.shape}') if not weights.ndim == 1: raise ValueError(f'weights should be 1d, got shape {weights.shape}') if not std.ndim == 1: raise ValueError(f'std should be 1d, got shape {std.shape}') ndim = 1 weights_ = weights/weights.sum() self.dists_ = [partial(np.random.normal, loc = loc[i], scale = std[i]) for i in range(len(loc))] self.weights_ = weights_ self.n_dim_ = ndim #self.means_ = X #self.covariances_ = pw_variances return self class JaccardPointwiseGaussianMixture(DensityMixin, BaseEstimator): def __init__( self, pointwise_distribution = 'normal', alpha = 1, scale_variance_pointwise = True, variance_mapper = 'log', min_var_factor = 1e-2, ): self.pointwise_distribution = pointwise_distribution self.alpha = alpha self.scale_variance_pointwise = scale_variance_pointwise self.variance_mapper = variance_mapper self.min_var_factor = min_var_factor return def fit(self, X, jac_dists = None, **kwargs): ''' fits a mixture estimating pointwise varainces from proposed method ''' X = check_array( X, force_all_finite = True, ensure_2d = True ) if X.shape[-1] == 1: X = X.flatten() ndim = 1 else: ndim = X.shape[-1] #estimate global mean and cov pw_variances = pointwise_variance( values = X, jac_dists = jac_dists, min_var_factor = self.min_var_factor, alpha = self.alpha, variance_mapper = self.variance_mapper ) weights_ = (1 - jac_dists)**self.alpha weights_ = weights_/weights_.sum() if ndim == 1: pointwise_std = np.sqrt(pw_variances) self.dists_ = [partial(np.random.normal, loc = X[i], scale = pointwise_std[i]) for i in range(len(X))] else: self.dists_ = [partial(np.random.multivariate_normal, mean = X[i], cov = pw_variances[i]) for i in range(len(X))] self.weights_ = weights_ self.n_dim_ = ndim #self.means_ = X #self.covariances_ = pw_variances return self def sample(self, size = 1): ''' draw samples from mixture ''' idxs = np.random.choice(np.arange(len(self.dists_)), size = size, replace = True, p = self.weights_) counts = np.bincount(idxs) msk = counts > 0 unique, counts = np.arange(len(counts))[msk], counts[msk] samples = [] for i in range(len(unique)): idx = unique[i] n_samples = counts[i] sampled_values = self.dists_[idx](size = n_samples) samples.append(sampled_values) if sampled_values.ndim > 1: samples = np.vstack(samples) else: samples = np.vstack([s.reshape(-1,1) for s in samples]) return samples # Cell import numpy as np from scipy import stats from sklearn.base import DensityMixin, BaseEstimator from functools import partial from sklearn.utils import check_array # Cell def get_distribution_var_factor_jaccard(jac_dists, min_var_factor = 1e-2, alpha = 1, func
/ approximated_vector3_mag approximated_vector3_imag = approximated_vector3_imag / approximated_vector3_mag info.append("PROJECTION 3") info.append("Will project this vector onto basis " + str(current_basis_index) + ": " + "[" + str(new_sample_real2) + ", " + str(new_sample_imag2) + "]") info.append("Projection Value = " + str(projection3)) info.append("Scaled and Rounded Magnitude: " + str(scaled_magnitude3)) info.append("Approximate Vector: [" + str(approximated_vector3_real) + ", " + str(approximated_vector3_imag) + "]") info.append("~") #Take care of the case where there are four positive projections elif (number_of_positive_projections == 4): basis_magnitude_dictionary = {} for p in range(0, number_of_positive_projections): current_basis_index = positive_projections_sorted[p][0] current_basis_vector = basis_vectors[current_basis_index] #The first projection if (p == 0): #Calculate the projection projection1 = np.dot(raw_sample, current_basis_vector) #Scale and round based on preference if (rounding_type == 1): scaled_magnitude1 = np.abs(np.round(projection1 * max_cell_magnitude)) elif (rounding_type == 2): scaled_magnitude1 = np.abs(np.ceil(projection1 * max_cell_magnitude)) elif (rounding_type == 3): scaled_magnitude1 = np.abs(np.floor(projection1 * max_cell_magnitude)) #Threshold if out of bounds if (scaled_magnitude1 > max_cell_magnitude): scaled_magnitude1 = max_cell_magnitude #Catch the first projection p1 = cgh.genSubAp(int(scaled_magnitude1), current_basis_index, max_cell_magnitude, phase_sectors, T3) linear_array_of_cells_p1.append(p1) #Store the projection and its scaled magnitude in the dictionary basis_magnitude_dictionary[current_basis_index] = scaled_magnitude1 #Calculate the approximated basis vector approximated_vector1 = projection1 * current_basis_vector approximated_vector1_real = approximated_vector1[0] approximated_vector1_imag = approximated_vector1[1] #Bring it back inside the unit circle if it is out approximated_vector1_mag = np.sqrt(np.square(approximated_vector1_real) + np.square(approximated_vector1_imag)) if (approximated_vector1_mag > 1.0): approximated_vector1_real = approximated_vector1_real / approximated_vector1_mag approximated_vector1_imag = approximated_vector1_imag / approximated_vector1_mag #---------- BEGIN INTRASAMPLE ERROR CORRECTION 1 ----------# delta_real1 = raw_sample_real_part - approximated_vector1_real delta_imag1 = raw_sample_imag_part - approximated_vector1_imag if (delta_real1 >= 0): new_sample_real1 = raw_sample_real_part + np.abs(delta_real1) elif (delta_real1 < 0): new_sample_real1 = raw_sample_real_part - np.abs(delta_real1) else: print("UNEXCPECTED CASE ENCOUNTERED - 1") if (delta_imag1 >= 0): new_sample_imag1 = raw_sample_imag_part + np.abs(delta_imag1) elif (delta_imag1 < 0): new_sample_imag1 = raw_sample_imag_part - np.abs(delta_imag1) else: print("UNEXCPECTED CASE ENCOUNTERED - 1") new_sample1_mag = np.sqrt(np.square(new_sample_real1) + np.square(new_sample_imag1)) if (new_sample1_mag > 1.0): new_sample_real1 = new_sample_real1 / new_sample1_mag new_sample_imag1 = new_sample_imag1 / new_sample1_mag new_sample1 = np.array([new_sample_real1, new_sample_imag1]) #---------- END INTRASAMPLE ERROR CORRECTION 1 ----------# info.append("PROJECTION 1") info.append("Will project this vector onto basis " + str(current_basis_index) + ": " + "[" + str(raw_sample_real_part) + ", " + str(raw_sample_imag_part) + "]") info.append("Projection Value = " + str(projection1)) info.append("Scaled and Rounded Magnitude: " + str(scaled_magnitude1)) info.append("Approximate Vector: [" + str(approximated_vector1_real) + ", " + str(approximated_vector1_imag) + "]") info.append("New Sample: [" + str(new_sample_real1) + ", " + str(new_sample_imag1) + "]") info.append("~") #The second projection if (p == 1): #Calculate the projection projection2 = np.dot(new_sample1, current_basis_vector) #Round based on preference if (rounding_type == 1): scaled_magnitude2 = np.abs(np.round(projection2 * max_cell_magnitude)) elif (rounding_type == 2): scaled_magnitude2 = np.abs(np.ceil(projection2 * max_cell_magnitude)) elif (rounding_type == 3): scaled_magnitude2 = np.abs(np.floor(projection2 * max_cell_magnitude)) #Threshold if out of bounds if (scaled_magnitude2 > max_cell_magnitude): scaled_magnitude2 = max_cell_magnitude #Catch the second projection p2 = cgh.genSubAp(int(scaled_magnitude2), current_basis_index, max_cell_magnitude, phase_sectors, T3) linear_array_of_cells_p2.append(p1+p2) #Store the projection and its scaled magnitude in the dictionary basis_magnitude_dictionary[current_basis_index] = scaled_magnitude2 #Calculate the approximated basis vector approximated_vector2 = projection2 * current_basis_vector approximated_vector2_real = approximated_vector2[0] approximated_vector2_imag = approximated_vector2[1] #Bring it back inside the unit circle if it is out approximated_vector2_mag = np.sqrt(np.square(approximated_vector2_real) + np.square(approximated_vector2_imag)) if (approximated_vector2_mag > 1.0): approximated_vector2_real = approximated_vector2_real / approximated_vector2_mag approximated_vector2_imag = approximated_vector2_imag / approximated_vector2_mag #---------- BEGIN INTRASAMPLE ERROR CORRECTION 2 ----------# delta_real2 = new_sample_real1 - approximated_vector2_real delta_imag2 = new_sample_imag1 - approximated_vector2_imag if (delta_real2 >= 0): new_sample_real2 = new_sample_real1 + np.abs(delta_real2) elif (delta_real2 < 0): new_sample_real2 = new_sample_real1 - np.abs(delta_real2) else: print("UNEXCPECTED CASE ENCOUNTERED - 2") if (delta_imag2 >= 0): new_sample_imag2 = new_sample_imag1 + np.abs(delta_imag2) elif (delta_imag2 < 0): new_sample_imag2 = new_sample_imag1 - np.abs(delta_imag2) else: print("UNEXCPECTED CASE ENCOUNTERED - 2") new_sample2_mag = np.sqrt(np.square(new_sample_real2) + np.square(new_sample_imag2)) if (new_sample2_mag > 1.0): new_sample_real2 = new_sample_real2 / new_sample2_mag new_sample_imag2 = new_sample_imag2 / new_sample2_mag new_sample2 = np.array([new_sample_real2, new_sample_imag2]) #---------- END INTRASAMPLE ERROR CORRECTION 2 ----------# info.append("PROJECTION 2") info.append("Will project this vector onto basis " + str(current_basis_index) + ": " + "[" + str(new_sample_real1) + ", " + str(new_sample_imag1) + "]") info.append("Projection Value = " + str(projection2)) info.append("Scaled and Rounded Magnitude: " + str(scaled_magnitude2)) info.append("Approximate Vector: [" + str(approximated_vector2_real) + ", " + str(approximated_vector2_imag) + "]") info.append("New Sample: [" + str(new_sample_real2) + ", " + str(new_sample_imag2) + "]") info.append("~") #The third projection if (p == 2): #Calculate the projection projection3 = np.dot(new_sample2, current_basis_vector) #Round based on preference if (rounding_type == 1): scaled_magnitude3 = np.abs(np.round(projection3 * max_cell_magnitude)) elif (rounding_type == 2): scaled_magnitude3 = np.abs(np.ceil(projection3 * max_cell_magnitude)) elif (rounding_type == 3): scaled_magnitude3 = np.abs(np.floor(projection3 * max_cell_magnitude)) #Threshold if out of bounds if (scaled_magnitude3 > max_cell_magnitude): scaled_magnitude3 = max_cell_magnitude #Catch the third projection p3 = cgh.genSubAp(int(scaled_magnitude3), current_basis_index, max_cell_magnitude, phase_sectors, T3) linear_array_of_cells_p3.append(p1+p2+p3) #Store the projection and its scaled magnitude in the dictionary basis_magnitude_dictionary[current_basis_index] = scaled_magnitude3 #Calculate the approximated basis vector approximated_vector3 = projection3 * current_basis_vector approximated_vector3_real = approximated_vector3[0] approximated_vector3_imag = approximated_vector3[1] #Bring it back inside the unit circle if it is out approximated_vector3_mag = np.sqrt(np.square(approximated_vector3_real) + np.square(approximated_vector3_imag)) if (approximated_vector3_mag > 1.0): approximated_vector3_real = approximated_vector3_real / approximated_vector3_mag approximated_vector3_imag = approximated_vector3_imag / approximated_vector3_mag #---------- BEGIN INTRASAMPLE ERROR CORRECTION 3 ----------# delta_real3 = new_sample_real2 - approximated_vector3_real delta_imag3 = new_sample_imag2 - approximated_vector3_imag if (delta_real3 >= 0): new_sample_real3 = new_sample_real2 + np.abs(delta_real3) elif (delta_real3 < 0): new_sample_real3 = new_sample_real2 - np.abs(delta_real3) else: print("UNEXCPECTED CASE ENCOUNTERED - 3") if (delta_imag3 >= 0): new_sample_imag3 = new_sample_imag2 + np.abs(delta_imag3) elif (delta_imag3 < 0): new_sample_imag3 = new_sample_imag2 - np.abs(delta_imag3) else: print("UNEXCPECTED CASE ENCOUNTERED - 3") new_sample3_mag = np.sqrt(np.square(new_sample_real3) + np.square(new_sample_imag3)) if (new_sample3_mag > 1.0): new_sample_real3 = new_sample_real3 / new_sample3_mag new_sample_imag3 = new_sample_imag3 / new_sample3_mag new_sample3 = np.array([new_sample_real3, new_sample_imag3]) #---------- END INTRASAMPLE ERROR CORRECTION 3 ----------# info.append("PROJECTION 3") info.append("Will project this vector onto basis " + str(current_basis_index) + ": " + "[" + str(new_sample_real2) + ", " + str(new_sample_imag2) + "]") info.append("Projection Value = " + str(projection3)) info.append("Scaled and Rounded Magnitude: " + str(scaled_magnitude3)) info.append("Approximate Vector: [" + str(approximated_vector3_real) + ", " + str(approximated_vector3_imag) + "]") info.append("New Sample: [" + str(new_sample_real3) + ", " + str(new_sample_imag3) + "]") info.append("~") #The fourth projection if (p == 3): #Calculate the projection projection4 = np.dot(new_sample3, current_basis_vector) #Round based on preference if (rounding_type == 1): scaled_magnitude4 = np.abs(np.round(projection4 * max_cell_magnitude)) elif (rounding_type == 2): scaled_magnitude4 = np.abs(np.ceil(projection4 * max_cell_magnitude)) elif (rounding_type == 3): scaled_magnitude4 = np.abs(np.floor(projection4 * max_cell_magnitude)) #Threshold if out of bounds if (scaled_magnitude4 > max_cell_magnitude): scaled_magnitude4 = max_cell_magnitude #Catch the fourth projection p4 = cgh.genSubAp(int(scaled_magnitude4), current_basis_index, max_cell_magnitude, phase_sectors, T3) linear_array_of_cells_p4.append(p1+p2+p3+p4) #Store the projection and its scaled magnitude in the dictionary basis_magnitude_dictionary[current_basis_index] = scaled_magnitude4 #Calculate the approximated basis vector approximated_vector4 = projection4 * current_basis_vector approximated_vector4_real = approximated_vector4[0] approximated_vector4_imag = approximated_vector4[1] approximated_vector4_mag = np.sqrt(np.square(approximated_vector4_real) + np.square(approximated_vector4_imag)) if (approximated_vector4_mag > 1.0): approximated_vector4_real = approximated_vector4_real / approximated_vector4_mag approximated_vector4_imag = approximated_vector4_imag / approximated_vector4_mag info.append("PROJECTION 4") info.append("Will project this vector onto basis " + str(current_basis_index) + ": " + "[" + str(new_sample_real3) + ", " + str(new_sample_imag3) + "]") info.append("Projection Value = " + str(projection4)) info.append("Scaled and Rounded Magnitude: " + str(scaled_magnitude4)) info.append("Approximate Vector: [" + str(approximated_vector4_real) + ", " + str(approximated_vector4_imag) + "]") info.append("~") else: print("UH OH! THIS NUMBER OF POSITIVE PROJECTIONS WASN'T ACCOUNTED FOR!") return -1 info.append("Cell: ") if (return_type == 1): info.append(str(p1)) elif (return_type == 2): info.append(p1 + p2) elif (return_type == 3): info.append(p1 + p2 + p3) elif (return_type == 4): info.append(p1 + p2 + p3 + p4) info.append("------------ END SAMPLE ------------") info.append("\n") if (verbose == True): #To ensure that 'useful' cells are being displayed cell_importance = 1 if (np.sum(p1) >= cell_importance): print(*info, sep = '\n') info.clear() #Create the CGH from the cells stacked_CGH_rows_p1 = [] stacked_CGH_rows_p2 = [] stacked_CGH_rows_p3 = [] stacked_CGH_rows_p4 = [] for r in range(0, np.square(N), N): stacked_CGH_rows_p1.append(np.hstack(linear_array_of_cells_p1[r:(r+N)])) stacked_CGH_rows_p2.append(np.hstack(linear_array_of_cells_p2[r:(r+N)])) stacked_CGH_rows_p3.append(np.hstack(linear_array_of_cells_p3[r:(r+N)])) stacked_CGH_rows_p4.append(np.hstack(linear_array_of_cells_p4[r:(r+N)])) CGH_p1 = np.vstack(stacked_CGH_rows_p1) CGH_p2 = np.vstack(stacked_CGH_rows_p2) CGH_p3 = np.vstack(stacked_CGH_rows_p3) CGH_p4 = np.vstack(stacked_CGH_rows_p4) if (return_type == 1): return CGH_p1 elif (return_type == 2): return CGH_p2 elif (return_type == 3): return CGH_p3 elif (return_type == 4): return CGH_p4 else: print("PLEASE SPECIFY A VALID RETURN
are as follows; see the documentation for :mod:`sage.algebras.steenrod.steenrod_algebra` for details on each basis: - 'milnor': Milnor basis. - 'serre-cartan' or 'adem' or 'admissible': Serre-Cartan basis. - 'pst', 'pst_rlex', 'pst_llex', 'pst_deg', 'pst_revz': various `P^s_t`-bases. - 'comm', 'comm_rlex', 'comm_llex', 'comm_deg', 'comm_revz', or any of these with '_long' appended: various commutator bases. The rest of these bases are only defined when `p=2`. - 'wood_y': Wood's Y basis. - 'wood_z': Wood's Z basis. - 'wall' or 'wall_long': Wall's basis. - 'arnon_a' or 'arnon_a_long': Arnon's A basis. - 'arnon_c': Arnon's C basis. EXAMPLES:: sage: from sage.algebras.steenrod.steenrod_algebra_bases import steenrod_algebra_basis sage: steenrod_algebra_basis(7,'milnor') # indirect doctest ((0, 0, 1), (1, 2), (4, 1), (7,)) sage: steenrod_algebra_basis(5) # milnor basis is the default ((2, 1), (5,)) Bases in negative dimensions are empty:: sage: steenrod_algebra_basis(-2, 'wall') () The third (optional) argument to 'steenrod_algebra_basis' is the prime p:: sage: steenrod_algebra_basis(9, 'milnor', p=3) (((1,), (1,)), ((0,), (2,))) sage: steenrod_algebra_basis(9, 'milnor', 3) (((1,), (1,)), ((0,), (2,))) sage: steenrod_algebra_basis(17, 'milnor', 3) (((2,), ()), ((1,), (3,)), ((0,), (0, 1)), ((0,), (4,))) Other bases:: sage: steenrod_algebra_basis(7,'admissible') ((7,), (6, 1), (4, 2, 1), (5, 2)) sage: steenrod_algebra_basis(13,'admissible',p=3) ((1, 3, 0), (0, 3, 1)) sage: steenrod_algebra_basis(5,'wall') (((2, 2), (0, 0)), ((1, 1), (1, 0))) sage: steenrod_algebra_basis(5,'wall_long') (((2, 2), (0, 0)), ((1, 1), (1, 0))) sage: steenrod_algebra_basis(5,'pst-rlex') (((0, 1), (2, 1)), ((1, 1), (0, 2))) """ from .steenrod_algebra_misc import get_basis_name try: if n < 0 or int(n) != n: return () except TypeError: return () generic = kwds.get("generic", False if p==2 else True) basis_name = get_basis_name(basis, p, generic=generic) if basis_name.find('long') >= 0: basis_name = basis_name.rsplit('_', 1)[0] profile = kwds.get("profile", None) if (profile is not None and profile != () and profile != ((), ()) and basis != 'milnor' and basis.find('pst') == -1): raise ValueError("Profile functions may only be used with the Milnor or pst bases") ## Milnor basis if basis_name == 'milnor': return milnor_basis(n,p,**kwds) ## Serre-Cartan basis elif basis_name == 'serre-cartan': return serre_cartan_basis(n,p,**kwds) ## Atomic bases, p odd: elif generic and (basis_name.find('pst') >= 0 or basis_name.find('comm') >= 0): return atomic_basis_odd(n, basis_name, p, **kwds) ## Atomic bases, p=2 elif not generic and (basis_name == 'woody' or basis_name == 'woodz' or basis_name == 'wall' or basis_name == 'arnona' or basis_name.find('pst') >= 0 or basis_name.find('comm') >= 0): return atomic_basis(n, basis_name, **kwds) ## Arnon 'C' basis elif not generic and basis == 'arnonc': return arnonC_basis(n) else: raise ValueError("Unknown basis: %s at the prime %s" % (basis, p)) # helper functions for producing bases def restricted_partitions(n, l, no_repeats=False): """ List of 'restricted' partitions of n: partitions with parts taken from list. INPUT: - ``n`` - non-negative integer - ``l`` - list of positive integers - ``no_repeats`` - boolean (optional, default = False), if True, only return partitions with no repeated parts OUTPUT: list of lists One could also use ``Partitions(n, parts_in=l)``, but this function may be faster. Also, while ``Partitions(n, parts_in=l, max_slope=-1)`` should in theory return the partitions of `n` with parts in ``l`` with no repetitions, the ``max_slope=-1`` argument is ignored, so it doesn't work. (At the moment, the ``no_repeats=True`` case is the only one used in the code.) EXAMPLES:: sage: from sage.algebras.steenrod.steenrod_algebra_bases import restricted_partitions sage: restricted_partitions(10, [7,5,1]) [[7, 1, 1, 1], [5, 5], [5, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] sage: restricted_partitions(10, [6,5,4,3,2,1], no_repeats=True) [[6, 4], [6, 3, 1], [5, 4, 1], [5, 3, 2], [4, 3, 2, 1]] sage: restricted_partitions(10, [6,4,2]) [[6, 4], [6, 2, 2], [4, 4, 2], [4, 2, 2, 2], [2, 2, 2, 2, 2]] sage: restricted_partitions(10, [6,4,2], no_repeats=True) [[6, 4]] 'l' may have repeated elements. If 'no_repeats' is False, this has no effect. If 'no_repeats' is True, and if the repeated elements appear consecutively in 'l', then each element may be used only as many times as it appears in 'l':: sage: restricted_partitions(10, [6,4,2,2], no_repeats=True) [[6, 4], [6, 2, 2]] sage: restricted_partitions(10, [6,4,2,2,2], no_repeats=True) [[6, 4], [6, 2, 2], [4, 2, 2, 2]] (If the repeated elements don't appear consecutively, the results are likely meaningless, containing several partitions more than once, for example.) In the following examples, 'no_repeats' is False:: sage: restricted_partitions(10, [6,4,2]) [[6, 4], [6, 2, 2], [4, 4, 2], [4, 2, 2, 2], [2, 2, 2, 2, 2]] sage: restricted_partitions(10, [6,4,2,2,2]) [[6, 4], [6, 2, 2], [4, 4, 2], [4, 2, 2, 2], [2, 2, 2, 2, 2]] sage: restricted_partitions(10, [6,4,4,4,2,2,2,2,2,2]) [[6, 4], [6, 2, 2], [4, 4, 2], [4, 2, 2, 2], [2, 2, 2, 2, 2]] """ if n < 0: return [] elif n == 0: return [[]] else: results = [] if no_repeats: index = 1 else: index = 0 old_i = 0 for i in l: if old_i != i: for sigma in restricted_partitions(n-i, l[index:], no_repeats): results.append([i] + sigma) index += 1 old_i = i return results def xi_degrees(n,p=2, reverse=True): r""" Decreasing list of degrees of the xi_i's, starting in degree n. INPUT: - `n` - integer - `p` - prime number, optional (default 2) - ``reverse`` - bool, optional (default True) OUTPUT: ``list`` - list of integers When `p=2`: decreasing list of the degrees of the `\xi_i`'s with degree at most n. At odd primes: decreasing list of these degrees, each divided by `2(p-1)`. If ``reverse`` is False, then return an increasing list rather than a decreasing one. EXAMPLES:: sage: sage.algebras.steenrod.steenrod_algebra_bases.xi_degrees(17) [15, 7, 3, 1] sage: sage.algebras.steenrod.steenrod_algebra_bases.xi_degrees(17, reverse=False) [1, 3, 7, 15] sage: sage.algebras.steenrod.steenrod_algebra_bases.xi_degrees(17,p=3) [13, 4, 1] sage: sage.algebras.steenrod.steenrod_algebra_bases.xi_degrees(400,p=17) [307, 18, 1] """ from sage.rings.all import Integer if n <= 0: return [] N = Integer(n*(p-1) + 1) l = [(p**d-1)//(p-1) for d in range(1, N.exact_log(p)+1)] if reverse: l.reverse() return l ######################################################## # Functions for defining bases. # These should each return a tuple of tuples of the appropriate form # for the basis. For example, at the prime 2, the Milnor basis # element Sq(a,b,c,...) corresponds to the tuple (a, b, c, ...), while # at odd primes, the element Q_i Q_j ... P(a, b, ...) corresponds to # the pair ((i, j, ...), (a, b, ...)). See each function for more # information. def milnor_basis(n, p=2, **kwds): r""" Milnor basis in dimension `n` with profile function ``profile``. INPUT: - ``n`` - non-negative integer - ``p`` - positive prime number (optional, default 2) - ``profile`` - profile function (optional, default None). Together with ``truncation_type``, specify the profile function to be used; None means the profile function for the entire Steenrod algebra. See :mod:`sage.algebras.steenrod.steenrod_algebra` and :func:`SteenrodAlgebra <sage.algebras.steenrod.steenrod_algebra.SteenrodAlgebra>` for information on profile functions. - ``truncation_type`` - truncation type, either 0 or Infinity (optional, default Infinity if no profile function is specified, 0 otherwise) OUTPUT: tuple of mod p Milnor basis elements in dimension n At the prime 2, the Milnor basis consists of symbols of the form `\text{Sq}(m_1, m_2, ..., m_t)`, where each `m_i` is a non-negative integer and if `t>1`, then `m_t \neq 0`. At odd primes, it consists of symbols of the form `Q_{e_1} Q_{e_2} ... P(m_1, m_2, ..., m_t)`, where `0 \leq e_1 < e_2 < ...`, each `m_i` is a non-negative integer, and if `t>1`, then `m_t \neq 0`. EXAMPLES:: sage: from sage.algebras.steenrod.steenrod_algebra_bases import milnor_basis sage: milnor_basis(7) ((0, 0, 1), (1, 2), (4, 1), (7,)) sage: milnor_basis(7, 2) ((0, 0, 1), (1, 2), (4, 1), (7,)) sage: milnor_basis(4, 2) ((1, 1), (4,)) sage: milnor_basis(4, 2, profile=[2,1]) ((1, 1),) sage: milnor_basis(4, 2, profile=(), truncation_type=0) () sage: milnor_basis(4, 2, profile=(), truncation_type=Infinity) ((1, 1), (4,)) sage: milnor_basis(9, 3) (((1,), (1,)), ((0,), (2,))) sage: milnor_basis(17, 3) (((2,), ()), ((1,), (3,)), ((0,), (0, 1)), ((0,), (4,))) sage: milnor_basis(48, p=5) (((), (0, 1)), ((), (6,))) sage: len(milnor_basis(100,3)) 13 sage: len(milnor_basis(200,7)) 0 sage: len(milnor_basis(240,7)) 3 sage: len(milnor_basis(240,7, profile=((),()), truncation_type=Infinity)) 3 sage: len(milnor_basis(240,7, profile=((),()), truncation_type=0)) 0 """ generic = kwds.get('generic', False if p==2 else True) if n == 0: if not generic: return ((),) else: return (((), ()),) from sage.rings.infinity import Infinity from sage.combinat.integer_vector_weighted import WeightedIntegerVectors profile
<reponame>KiDS-WL/Cat_to_Obs_K1000_P1 # ---------------------------------------------------------------- # File Name: Shear_ratio_wspin_test.py # Author: <NAME> (<EMAIL>) # Description: short python script to run treecorr to calculate GGL # for the shear ratio test # for the covariance we use the spin test # where the observed ellipticity is randomised # ---------------------------------------------------------------- import treecorr import sys import os from astropy.io import fits import numpy as np import matplotlib.pyplot as plt DFLAG ='' # !!! FLAG TO CALC DATA GT FROM MICE OCTANT, NOT FIDUCIAL MICE. Cov_Method = "Spin" #"Spin" # The method for calculating the gamma_t realisations for use in covariance estimation # "Spin" means do many spin realisations of the source ellipticities (ie - shape noise only) # "Patch" means read in the other MICE realisations (1/8 of the sky) # divide them into patches and calcute the gamma_t from each patch. if Cov_Method != "Spin" and Cov_Method != "Patch" and Cov_Method != "None": print("Cov_Method must be set to Spin, Patch or None. Currently it's set to %s. EXITING." %Cov_Method) sys.exit() nPatch = 16 # If Cov_Method is Patch, the MICE octant is split into nPatch RA # and nPatch Dec slices (nPatch^2 patches in total). gamma_t is # calculated from each patch. nlens = 5 # !!! IMPORTANT PARAMETER. # The number of bins the lenses are divided into. # Read in user input to set the nbins, theta_min, theta_max, lin_not_log, fitscat1, fitscat2, outfilename if len(sys.argv) <6: #print("Usage: %s nbins theta_min(arcmin) theta_max(arcmin) source_tomobin number_of_spins" % sys.argv[0]) print("Usage: %s nbins theta_min(arcmin) theta_max(arcmin) source_tomobin" % sys.argv[0]) sys.exit(1) else: nbins = int(sys.argv[1]) theta_min = float(sys.argv[2]) theta_max = float(sys.argv[3]) JZBIN = int(sys.argv[4]) ntrials = int(sys.argv[5]) izin = int(sys.argv[6]) ntprevrun = int(sys.argv[7]) paramfile = sys.argv[8] # The following lines are used to separately save a finely binned gamma_t # for use in producing theoretical gamma_t's at the correct (mean-point) theta # values using kcap. if nbins > 10: thsavetag = '_FineBins%s' %nbins else: thsavetag = '' # ----- Load Input ----- # from Get_Input import Get_Input GI = Get_Input(paramfile) SOURCE_TYPE = GI.Source_Type() LENS_TYPE = GI.Lens_Type() RANDOM_TYPE = GI.Random_Type() if SOURCE_TYPE == "K1000": Blind = GI.Blind() CATDIR='/disk09/KIDS/K1000_TWO_PT_STATS/TOMOCATS/' # These are the old catalogues binned using DIR-redshift estimates #fitscat=CATDIR+'/K1000_N_BLIND_%s_V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_goldclasses_5Z_%s.fits' %(Blind,JZBIN) # These are the new catalogues binned using the SOM-redshift estimates SOMFLAGNAME = GI.SOMFLAGNAME() fitscat=CATDIR+'K1000_N_BLIND_%s_V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_goldclasses_Flag_SOM_%s_5Z_%s.fits' %(Blind,SOMFLAGNAME,JZBIN) # Fits extension and keywords depend on data source: iext=1 # File extension ra_keyword='ALPHA_J2000' dec_keyword='DELTA_J2000' w_keyword='weight' e1_keyword='e1' e2_keyword='e2' flip_g1 = False flip_g2 = False # Extra info needed to identify lenscat lens_tag = '' # Location of the output files OUTDIR='Output/SOURCE-%s_LENS-%s_Blind%s_SOM%s' %(SOURCE_TYPE,LENS_TYPE,Blind,SOMFLAGNAME) elif SOURCE_TYPE == "MICE2_KV450": Mag_OnOff = GI.Mag_OnOff() Pz = GI.Pz_TrueEstimated() SN = GI.SN() MICE_DIR = '/home/bengib/MICE2_Mocks/MICE2_KV450/shear_CATS_Pz%s_SN%s%s' %(Pz,SN,DFLAG) fitscat='%s/MICE2_KV450_magnification_%s_small_6Z_%s.fits' %(MICE_DIR,Mag_OnOff,JZBIN) iext=1 ra_keyword='ra_gal' dec_keyword='dec_gal' w_keyword='recal_weight' e1_keyword='gamma1' e2_keyword='gamma2' if Mag_OnOff == 'on': ra_keyword += '_mag' dec_keyword += '_mag' flip_g1 = True # Quite bizarrely one must flip g1 with <NAME>'s MICE mocks flip_g2 = False # (But not with <NAME>'s MICE catalogues. Simples.) # Extra info needed to identify lenscat lens_tag = '_mag%s'%Mag_OnOff # Location of the output files OUTDIR='Output/SOURCE-%s_LENS-%s_Pz%s_SN%s_mag%s' %(SOURCE_TYPE,LENS_TYPE,Pz,SN,Mag_OnOff) else: print("This code only accepts Source_Type set to 'K1000' or 'MICE2_KV450'... ") print("Here SOURCE_TYPE is set to ",SOURCE_TYPE," which is not recognised so I'm exiting.") sys.exit(1) if not os.path.exists(OUTDIR): os.makedirs(OUTDIR) os.makedirs(OUTDIR+'/SPIN') # Because we're rotating the observed ellipticities of the sources # for the spin test we won't use the built # in treecorr read from the fits catalogue, but read it first and then modify #open the fits catalogue fitsfile = fits.open(fitscat) # read in position and shear ra = (fitsfile[iext].data[ra_keyword]) dec = (fitsfile[iext].data[dec_keyword]) eobs1 = (fitsfile[iext].data[e1_keyword]) eobs2 = (fitsfile[iext].data[e2_keyword]) weight = (fitsfile[iext].data[w_keyword]) ngals=len(ra) print("ngals", ngals) # the unspun source catalogue sourcecat = treecorr.Catalog(ra=ra,dec=dec,g1=eobs1,g2=eobs2, ra_units='deg', dec_units='deg',w=weight, flip_g1=flip_g1, flip_g2=flip_g2) sourcecat_wsq = treecorr.Catalog(ra=ra,dec=dec,g1=eobs1,g2=eobs2, ra_units='deg', dec_units='deg',w=weight**2, flip_g1=flip_g1, flip_g2=flip_g2) # If we're using the MICE octant for covariance, read in the source info here if Cov_Method == "Patch": MICE_DIR_OCTANT = '/home/bengib/MICE2_Mocks/MICE2_KV450/shear_CATS_Pz%s_SN%s_Octant' %(Pz,SN) fitsfile_o=fits.open('%s/MICE2_KV450_magnification_%s_small_6Z_%s.fits' %(MICE_DIR_OCTANT,Mag_OnOff,JZBIN)) # read in position and shear for octant sources ra_s = (fitsfile_o[iext].data[ra_keyword]) dec_s = (fitsfile_o[iext].data[dec_keyword]) eobs1_s = (fitsfile_o[iext].data[e1_keyword]) eobs2_s = (fitsfile_o[iext].data[e2_keyword]) weight_s = (fitsfile_o[iext].data[w_keyword]) # Useful functions if Cov_Method is Patch # Function returns elements INSIDE (ra,dec) patch def Select_Patch(Q, ra, dec, rlo, rhi, dlo, dhi): # return the elements in Q corresponding to INSIDE the (ra,dec) range idx_ra = np.where(np.logical_and(ra<rhi, ra>rlo))[0] idx_dec = np.where(np.logical_and(dec<dhi, dec>dlo))[0] idx = np.intersect1d(idx_ra, idx_dec) return Q[idx] # Function returns elements OUTSIDE (ra,dec) patch def Split_Fields(Q, ra, dec, rlo, rhi, dlo, dhi): # return the elements in Q corresponding to OUTSIDE the (ra,dec) range (Jackknife) idx_ra = np.append( np.where(ra>rhi)[0], np.where(ra<rlo)[0] ) idx_dec = np.append( np.where(dec>dhi)[0], np.where(dec<dlo)[0] ) idx = np.unique( np.append(idx_ra, idx_dec) ) # delete the things that appear more than once return Q[idx] # To minimise I/O source catalogue read across the cluster, we'll loop over all lens bins # If the WL-72 nodes workers are not available this will be sub-optimal and it would be better # to queue each source-lens bin pair individually on the cluster for IZBIN in range (izin,izin+1): #1,2,3,4,5 lenscatname='LENSCATS/%s%s%s/lens_cat_%sZ_%s.fits' %(LENS_TYPE,lens_tag,DFLAG, nlens,IZBIN) rancatname='LENSCATS/%s%s%s/lens_cat_%sZ_%s.fits' %(RANDOM_TYPE,lens_tag,DFLAG, nlens,IZBIN) outfile_main='%s/GT_6Z_source_%s_%sZ_lens_%s%s%s.asc' %(OUTDIR,JZBIN, nlens,IZBIN, DFLAG,thsavetag) # the lens catalogue we will not modify so we can use the built in treecorr option to read # in directly from the catalogue lenscat = treecorr.Catalog(lenscatname, ra_col='ALPHA_J2000', dec_col='DELTA_J2000', ra_units='deg', dec_units='deg', w_col='WEICOMP') rancat = treecorr.Catalog(rancatname, ra_col='ALPHA_J2000', dec_col='DELTA_J2000', ra_units='deg', dec_units='deg', w_col='WEICOMP') # Set-up the different correlations that we want to measure bin_slop=0.08 # optimised in Flinc sims #bin_slop=0.12 # faster option # Number of source lens pairs nlns = treecorr.NNCorrelation(min_sep=theta_min, max_sep=theta_max, nbins=nbins, sep_units='arcmin', bin_slop=bin_slop) # Number of source random pairs nrns = treecorr.NNCorrelation(min_sep=theta_min, max_sep=theta_max, nbins=nbins, sep_units='arcmin', bin_slop=bin_slop) # Average shear around lenses ls = treecorr.NGCorrelation(min_sep=theta_min, max_sep=theta_max, nbins=nbins, sep_units='arcmin', bin_slop=bin_slop) # Average shear around randoms rs = treecorr.NGCorrelation(min_sep=theta_min, max_sep=theta_max, nbins=nbins, sep_units='arcmin', bin_slop=bin_slop) # ----- Average around lenses with squared weights ----- f = fits.open(lenscatname) ra_l_wsq = f[1].data['ALPHA_J2000'] dec_l_wsq = f[1].data['DELTA_J2000'] w_l_wsq = f[1].data['WEICOMP'] f.close() lenscat_wsq = treecorr.Catalog(ra=ra_l_wsq,dec=dec_l_wsq, ra_units='deg', dec_units='deg', w=w_l_wsq**2 ) ls_wsq = treecorr.NGCorrelation(min_sep=theta_min, max_sep=theta_max, nbins=nbins, sep_units='arcmin', bin_slop=bin_slop) # ------------------------------------------------------- # Now calculate the different 2pt correlation functions print("Calculating the number of source-lens pairs") nlns.process(lenscat,sourcecat) print("Calculating the number of source-random pairs") nrns.process(rancat,sourcecat) print("Calculating the average shear around the lenses") ls.process(lenscat,sourcecat) # only this one needs to be recalculated for each spin test print("Calculating the average shear around the randoms") rs.process(rancat,sourcecat) print("Calculating the average shear around the lenses WITH SQUARED WEIGHTS") ls_wsq.process(lenscat_wsq,sourcecat_wsq) # We will use the Mandelbaum 2006 estimator which includes both the random and boost correction. # It is given by # gt = (SD-SR)/NsNr # SD = sum of shear around source-lens pairs # SR = sum of shear around source-random pairs # NrNs = number of source random pairs # Note that we have a different number of randoms from lenses so we also need to make # sure that we rescale NrNs accordingly # The easiest way to do this in Treecorr is # gt = (SD/NlNs)*NlNs/NrNs - SR/NrNs # where NsNl = number of source lens pairs # # SD/NsNl = ls.xi # NlNs = nlns.weight/nlns.tot # NrNs = nrns.weight/nrns.tot # SR/NrNs = rs.xi gamma_t = ls.xi*(nlns.weight/nlns.tot)/(nrns.weight/nrns.tot) - rs.xi gamma_x = ls.xi_im*(nlns.weight/nlns.tot)/(nrns.weight/nrns.tot) - rs.xi_im # This next line produces data used in the computation of the analytical covariance matrix #npairs_weighted_simple = (ls.weight)*(ls.weight)/(ls_wsq.weight) print("Writing out", outfile_main) #Use treecorr to write out the output file and praise-be once more for Jarvis and his well documented code treecorr.util.gen_write(outfile_main, ['r_nom','meanr','meanlogr','gamT','gamX','sigma','weight','npairs', 'nocor_gamT', 'nocor_gamX', 'rangamT','rangamX','ransigma', 'ranweight','weight_sqrd' ], [ ls.rnom,ls.meanr, ls.meanlogr,gamma_t, gamma_x, np.sqrt(ls.varxi), ls.weight, ls.npairs, ls.xi, ls.xi_im, rs.xi, rs.xi_im, np.sqrt(rs.varxi), rs.weight,ls_wsq.weight ]) if Cov_Method == "Spin": # WE ARE CALCULATING THE COVARIANCE BY # spinning the source ellipticities many times (shape noise only) # Now carry out the spin test with default binslop # Average shear around lenses with default fast binslop lssp = treecorr.NGCorrelation(min_sep=theta_min, max_sep=theta_max, nbins=nbins, sep_units='arcmin') # ----- 19/12/19: GIBLIN's EDIT - CALC RANDOM SIGNAL AS WELL FOR SOURCE-SPINS ----- rssp = treecorr.NGCorrelation(min_sep=theta_min, max_sep=theta_max, nbins=nbins, sep_units='arcmin') # ----- # # As we loop through lens bins, we want to have the
"/usr/bin/%%", "/usr/sbin/%%", "/bin/%%", "/sbin/%%", "/usr/local/bin/%%", "/usr/local/sbin/%%", "%%/Downloads/%%" ], "configuration": [ "/etc/passwd", "/etc/shadow", "/etc/ld.so.conf", "/etc/ld.so.conf.d/%%", "/etc/pam.d/%%", "/etc/resolv.conf", "/etc/rc%/%%", "/etc/my.cnf", "/etc/hosts", "/etc/hostname", "/etc/fstab", "/etc/crontab", "/etc/cron%/%%", "/etc/init/%%", "/etc/rsyslog.conf" ] } } payload['queries'] = { "file_events": { "interval": 15, "status": True, "query": "SELECT * FROM file_events;" } } payload['platform'] = 'linux' resp = client.post(url_prefix + '/configs/update', headers={'x-access-token': token}, json=payload) assert resp.status_code == 200 response_dict = json.loads(resp.data) assert response_dict['status'] == 'success' assert response_dict['data']['queries']['file_events']['status'] == True assert response_dict['data']['queries']['file_events']['interval'] == 15 assert response_dict['data']['filters'] == payload['filters'] def test_edit_configs_list_with_winx86_64_pltform_and_valid_mandatory_payload(self, client, url_prefix, token, default_query, default_filter): """ Test-case platform value is windows, arch values x86_64, and type values (shallow/deep/default) and with filters and queries, expected output:- status is failure """ payload = {} payload['filters'] = { "win_include_paths": {"all_files": ["*"]}, "plgx_event_filters": { "win_ssl_events": { "process_name": { "exclude": { "values": [ "*\\Program Files\\osquery\\osqueryd\\osqueryd.exe", "*\\Program Files\\osquery\\plgx_win_extension.ext.exe", "*\\Program Files\\osquery\\plgx_cpt.exe" ] } } }, "win_file_events": { "target_path": { "exclude": { "values": [ "C:\\Windows\\system32\\DriverStore\\Temp\\*", "C:\\Windows\\system32\\wbem\\Performance*", "C:\\$WINDOWS.~BT\\Sources\\*", "C:\\Windows\\Installer\\*", "*WRITABLE.TST", "C:\\Windows\\System32\\Tasks\\Adobe Acrobat Update Task*", "C:\\Windows\\System32\\Tasks\\Adobe Flash Player Updater*", "C:\\Windows\\System32\\Tasks\\OfficeSoftwareProtectionPlatform\\SvcRestartTask*" ] }, "include": { "values": [ "*\\Start Menu*", "*\\Startup\\*", "*\\Content.Outlook\\*", "*\\Downloads\\*", "*.application", "*.appref-ms", "*.bat", "*.chm", "*.cmd", "*.cmdline", "*.docm", "*.exe", "*.jar", "*.jnlp", "*.jse", "*.hta", "*.pptm", "*.ps1", "*.sys", "*.scr", "*.vbe", "*.vbs", "*.xlsm", "*.proj", "*.sln", "C:\\Users\\Default*", "C:\\Windows\\system32\\Drivers*", "C:\\Windows\\SysWOW64\\Drivers*", "C:\\Windows\\system32\\GroupPolicy\\Machine\\Scripts*", "C:\\Windows\\system32\\GroupPolicy\\User\\Scripts*", "C:\\Windows\\system32\\Wbem*", "C:\\Windows\\SysWOW64\\Wbem*", "C:\\Windows\\system32\\WindowsPowerShell*", "C:\\Windows\\SysWOW64\\WindowsPowerShell*", "C:\\Windows\\Tasks\\*", "C:\\Windows\\system32\\Tasks*", "C:\\Windows\\AppPatch\\Custom*", "*VirtualStore*", "*.xls", "*.ppt", "*.rtf" ] } }, "process_name": { "exclude": { "values": [ "C:\\Program Files (x86)\\EMET 5.5\\EMET_Service.exe", "C:\\Program Files\\Common Files\\Microsoft Shared\\ClickToRun\\OfficeC2RClient.exe", "C:\\Windows\\system32\\smss.exe", "C:\\Windows\\system32\\CompatTelRunner.exe", "\\\\?\\C:\\Windows\\system32\\wbem\\WMIADAP.EXE", "C:\\Windows\\system32\\wbem\\WMIADAP.EXE", "C:\\Windows\\system32\\mobsync.exe", "C:\\Program Files (x86)\\Dell\\CommandUpdate\\InvColPC.exe", "C:\\Windows\\system32\\igfxCUIService.exe" ] } } }, "feature_vecsSASators": { "character_frequencies": [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00045, 0.01798, 0, 0.03111, 0.00063, 0.00027, 0, 0.01336, 0.0133, 0.00128, 0.0027, 0.00035, 0.00092, 0.027875, 0.007465, 0.016265, 0.013995, 0.00737, 0.025615, 0.001725, 0.002265, 0.017875, 0.016005, 0.02533, 0.025295, 0.014375, 0.00109, 0.02732, 0.02658, 0.037355, 0.011575, 0.00451, 0.005865, 0.003255, 0.005965, 0.00077, 0.00771, 0.002379, 0.00766, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] } } } payload['queries'] = { "platform_info": { "interval": 10, "status": True, "query": "select * from win_socket_events;" } } payload['platform'] = 'windows' resp = client.post(url_prefix + '/configs/update', headers={'x-access-token': token}, json=payload) assert resp.status_code == 200 response_dict = json.loads(resp.data) assert response_dict['status'] == 'failure' def test_edit_configs_list_with_winx86_pltform_and_valid_mandatory_payload(self, client, url_prefix, token, default_query, default_filter): """ Test-case platform value is windows, arch values x86, and type values (shallow/deep/default) and with filters and queries, expected output:- status is success, and response_data with query_name, status, interval and filters value """ payload = {} payload['filters'] = {} payload['queries'] = { "drivers": { "interval": 10, "status": True, "query": "select * from drivers;" } } payload['platform'] = 'windows' payload['arch'] = 'x86' resp = client.post(url_prefix + '/configs/update', headers={'x-access-token': token}, json=payload) assert resp.status_code == 200 response_dict = json.loads(resp.data) assert response_dict['status'] == 'success' assert response_dict['data']['queries']['drivers']['status'] == True assert response_dict['data']['queries']['drivers']['interval'] == 10 assert response_dict['data']['filters'] == payload['filters'] def test_edit_configs_list_with_invalid_queries_keys_payload(self, client, url_prefix, token, default_query, default_filter): """ Test-case platform value is darwin and with filters and queries, expected output:- status_code is 400 """ payload = {} payload['filters'] = { "file_pasths": { "binaries": ["/usr/bin/%%", "/usr/sbin/%%", "/bin/%%", "/sbin/%%", "/usr/local/bin/%%", "/usr/local/sbin/%%", "/opt/bin/%%", "/opt/sbin/%%"], "configuration": ["/etc/%%"] } } payload['queries'] = { "platform_info": { "interval": 15, "query": "SELECT * FROM platform_info;" } } payload['platform'] = 'darwin' resp = client.post(url_prefix + '/configs/update', headers={'x-access-token': token}, json=payload) assert resp.status_code == 400 response_dict = json.loads(resp.data) def test_edit_configs_list_with_all_payload(self, client, url_prefix, token, default_query, default_filter): """ Test-case with all paylods value expected output:- status is success, and response_data with query_name, status, interval and filters value """ self.payload['filters'] = { "file_pasths": { "binaries": ["/usr/bin/%%", "/usr/sbin/%%", "/bin/%%", "/sbin/%%", "/usr/local/bin/%%", "/usr/local/sbin/%%", "/opt/bin/%%", "/opt/sbin/%%"], "configuration": ["/etc/%%"] } } self.payload['queries'] = { "drivers": { "interval": 15, "status": True, "query": "select * from drivers;" } } self.payload['platform'] = 'windows' self.payload['arch'] = 'x86' self.payload['type'] = 'default' resp = client.post(url_prefix + '/configs/update', headers={'x-access-token': token}, json=self.payload) assert resp.status_code == 200 response_dict = json.loads(resp.data) assert response_dict['status'] == 'success' assert response_dict['data']['queries']['drivers']['status'] == True assert response_dict['data']['queries']['drivers']['interval'] == 15 assert response_dict['data']['filters'] == self.payload['filters'] class TestGetConfigByPlatformOrNode: """ Test-case inside this block where these payloads are used, all are optional values and of str type, and some values are already present to choose, like for platform, we have linux/windows/darwin, and for arch we have x86/x86_64, so if type of value is not matched or passed any other value for platform and arch then it will return 400 i.e., bad request """ payload = {'platform': None, 'arch': None, 'host_identifier': None} def test_get_config_without_payload(self, client, url_prefix, token): """ Test-case without payloads and without existing node and config data, expected output:- status is failure """ resp = client.post(url_prefix + '/configs/view', headers={'x-access-token': token}) assert resp.status_code == 200 response_dict = json.loads(resp.data) assert response_dict['status'] == 'failure' def test_get_config_with_payload_empty_dict(self, client, url_prefix, token): """ Test-case with payload is empty dictionary and without existing node and config data, expected output:- status is failure """ resp = client.post(url_prefix + '/configs/view', headers={'x-access-token': token}, json={}) assert resp.status_code == 200 response_dict = json.loads(resp.data) assert response_dict['status'] == 'failure' def test_get_config_with_payload_value_none(self, client, url_prefix, token): """ Test-case with payload value is none and without existing node and config data, expected output:- status_code is 400 """ resp = client.post(url_prefix + '/configs/view', headers={'x-access-token': token}, json=self.payload) assert resp.status_code == 400 def test_get_config_with_only_host_identifier(self, client, url_prefix, token): """ Test-case with only valid/invalid payload value of host_identifier and without existing node and config data, expected output:- status is failure """ payload = {'host_identifier': 'foobar'} resp = client.post(url_prefix + '/configs/view', headers={'x-access-token': token}, json=payload) assert resp.status_code == 200 response_dict = json.loads(resp.data) assert response_dict['status'] == 'failure' def test_get_config_with_only_invalid_platform_value(self, client, url_prefix, token): """ Test-case with only payload value of platform and without existing node and config data, expected output:- status_code is 400 """ payload = {'platform': 'foobar'} resp = client.post(url_prefix + '/configs/view', headers={'x-access-token': token}, json=payload) assert resp.status_code == 400 def test_get_config_with_only_valid_platform_value(self, client, url_prefix, token): """ Test-case with only payload value of platform and without existing node and config data, expected output:- status is failure """ payload = {'platform': 'darwin'} resp = client.post(url_prefix + '/configs/view', headers={'x-access-token': token}, json=payload) assert resp.status_code == 200 response_dict = json.loads(resp.data) assert response_dict['status'] == 'failure' def test_get_config_with_only_invalid_arch_value(self, client, url_prefix, token): """ Test-case with only invalid payload value of arch and without existing node and config data, expected output:- status_code is 400 """ payload = {'arch': 'foobar'} resp = client.post(url_prefix + '/configs/view', headers={'x-access-token': token}, json=payload) assert resp.status_code == 400 def test_get_config_with_only_valid_arch_value(self, client, url_prefix, token): """ Test-case with only valid payload value of arch and without existing node and config data, expected output:- status is failure """ payload = {'arch': 'x86'} resp = client.post(url_prefix + '/configs/view', headers={'x-access-token': token}, json=payload) assert resp.status_code == 200 response_dict = json.loads(resp.data) assert response_dict['status'] == 'failure' def test_get_config_with_platform_and_arch_value(self, client, url_prefix, token): """ Test-case with all valid payload value and without existing node and config data, expected output:- status is failure """ payload = {} payload['platform'] = 'windows' payload['arch'] = 'x86' resp = client.post(url_prefix + '/configs/view', headers={'x-access-token': token}, json=payload) assert resp.status_code == 200 response_dict = json.loads(resp.data) assert response_dict['status'] == 'failure' def test_get_config_with_all_valid_payload_value(self, client, url_prefix, token): """ Test-case with all valid payload value and without existing node and config data, expected output:- status is failure """ self.payload['platform'] = 'windows' self.payload['arch'] = 'x86' self.payload['host_identifier'] = 'foobar' resp = client.post(url_prefix + '/configs/view', headers={'x-access-token': token}, json=self.payload) assert resp.status_code == 200 response_dict = json.loads(resp.data) assert response_dict['status'] == 'failure' def test_get_config_with_invalid_method(self, client, url_prefix, token, node, options, default_filter, default_query): """ Test-case with invalid request method, expected output:- status code is 405 """ resp = client.get(url_prefix + '/configs/view', headers={'x-access-token': token}, json=self.payload) assert resp.status_code == 405 def test_get_configs_with_only_invalid_host_identifier(self, client, url_prefix, token, node, options, default_filter, default_query): """ Test-case with only invalid payload value of host_identifier and without existing node and config data, expected output:- status is failure """ payload = {'host_identifier': 'foo'} resp = client.post(url_prefix + '/configs/view',
else: return visitor.visitChildren(self) class AtomicLineExpContext(AtomicContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a ShapeExpressionParser.AtomicContext super().__init__(parser) self.copyFrom(ctx) def atomic_line(self): return self.getTypedRuleContext(ShapeExpressionParser.Atomic_lineContext,0) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitAtomicLineExp" ): return visitor.visitAtomicLineExp(self) else: return visitor.visitChildren(self) class AtomicExponentialExpContext(AtomicContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a ShapeExpressionParser.AtomicContext super().__init__(parser) self.copyFrom(ctx) def atomic_exponential(self): return self.getTypedRuleContext(ShapeExpressionParser.Atomic_exponentialContext,0) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitAtomicExponentialExp" ): return visitor.visitAtomicExponentialExp(self) else: return visitor.visitChildren(self) class AtomicConstExpContext(AtomicContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a ShapeExpressionParser.AtomicContext super().__init__(parser) self.copyFrom(ctx) def atomic_constant(self): return self.getTypedRuleContext(ShapeExpressionParser.Atomic_constantContext,0) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitAtomicConstExp" ): return visitor.visitAtomicConstExp(self) else: return visitor.visitChildren(self) def atomic(self): localctx = ShapeExpressionParser.AtomicContext(self, self._ctx, self.state) self.enterRule(localctx, 16, self.RULE_atomic) try: self.state = 144 self._errHandler.sync(self) token = self._input.LA(1) if token in [ShapeExpressionParser.CONSTANT]: localctx = ShapeExpressionParser.AtomicConstExpContext(self, localctx) self.enterOuterAlt(localctx, 1) self.state = 139 self.atomic_constant() pass elif token in [ShapeExpressionParser.LINE]: localctx = ShapeExpressionParser.AtomicLineExpContext(self, localctx) self.enterOuterAlt(localctx, 2) self.state = 140 self.atomic_line() pass elif token in [ShapeExpressionParser.EXPONENTIAL]: localctx = ShapeExpressionParser.AtomicExponentialExpContext(self, localctx) self.enterOuterAlt(localctx, 3) self.state = 141 self.atomic_exponential() pass elif token in [ShapeExpressionParser.SINE]: localctx = ShapeExpressionParser.AtomicSineExpContext(self, localctx) self.enterOuterAlt(localctx, 4) self.state = 142 self.atomic_sine() pass elif token in [ShapeExpressionParser.SINC]: localctx = ShapeExpressionParser.AtomicSincExpContext(self, localctx) self.enterOuterAlt(localctx, 5) self.state = 143 self.atomic_sinc() pass else: raise NoViableAltException(self) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Atomic_constantContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def CONSTANT(self): return self.getToken(ShapeExpressionParser.CONSTANT, 0) def LEFTPAREN(self): return self.getToken(ShapeExpressionParser.LEFTPAREN, 0) def Identifier(self, i:int=None): if i is None: return self.getTokens(ShapeExpressionParser.Identifier) else: return self.getToken(ShapeExpressionParser.Identifier, i) def COMMA(self): return self.getToken(ShapeExpressionParser.COMMA, 0) def RIGHTPAREN(self): return self.getToken(ShapeExpressionParser.RIGHTPAREN, 0) def getRuleIndex(self): return ShapeExpressionParser.RULE_atomic_constant def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitAtomic_constant" ): return visitor.visitAtomic_constant(self) else: return visitor.visitChildren(self) def atomic_constant(self): localctx = ShapeExpressionParser.Atomic_constantContext(self, self._ctx, self.state) self.enterRule(localctx, 18, self.RULE_atomic_constant) try: self.enterOuterAlt(localctx, 1) self.state = 146 self.match(ShapeExpressionParser.CONSTANT) self.state = 147 self.match(ShapeExpressionParser.LEFTPAREN) self.state = 148 self.match(ShapeExpressionParser.Identifier) self.state = 149 self.match(ShapeExpressionParser.COMMA) self.state = 150 self.match(ShapeExpressionParser.Identifier) self.state = 151 self.match(ShapeExpressionParser.RIGHTPAREN) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Atomic_lineContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def LINE(self): return self.getToken(ShapeExpressionParser.LINE, 0) def LEFTPAREN(self): return self.getToken(ShapeExpressionParser.LEFTPAREN, 0) def Identifier(self, i:int=None): if i is None: return self.getTokens(ShapeExpressionParser.Identifier) else: return self.getToken(ShapeExpressionParser.Identifier, i) def COMMA(self, i:int=None): if i is None: return self.getTokens(ShapeExpressionParser.COMMA) else: return self.getToken(ShapeExpressionParser.COMMA, i) def RIGHTPAREN(self): return self.getToken(ShapeExpressionParser.RIGHTPAREN, 0) def getRuleIndex(self): return ShapeExpressionParser.RULE_atomic_line def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitAtomic_line" ): return visitor.visitAtomic_line(self) else: return visitor.visitChildren(self) def atomic_line(self): localctx = ShapeExpressionParser.Atomic_lineContext(self, self._ctx, self.state) self.enterRule(localctx, 20, self.RULE_atomic_line) try: self.enterOuterAlt(localctx, 1) self.state = 153 self.match(ShapeExpressionParser.LINE) self.state = 154 self.match(ShapeExpressionParser.LEFTPAREN) self.state = 155 self.match(ShapeExpressionParser.Identifier) self.state = 156 self.match(ShapeExpressionParser.COMMA) self.state = 157 self.match(ShapeExpressionParser.Identifier) self.state = 158 self.match(ShapeExpressionParser.COMMA) self.state = 159 self.match(ShapeExpressionParser.Identifier) self.state = 160 self.match(ShapeExpressionParser.RIGHTPAREN) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Atomic_exponentialContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def EXPONENTIAL(self): return self.getToken(ShapeExpressionParser.EXPONENTIAL, 0) def LEFTPAREN(self): return self.getToken(ShapeExpressionParser.LEFTPAREN, 0) def Identifier(self, i:int=None): if i is None: return self.getTokens(ShapeExpressionParser.Identifier) else: return self.getToken(ShapeExpressionParser.Identifier, i) def COMMA(self, i:int=None): if i is None: return self.getTokens(ShapeExpressionParser.COMMA) else: return self.getToken(ShapeExpressionParser.COMMA, i) def RIGHTPAREN(self): return self.getToken(ShapeExpressionParser.RIGHTPAREN, 0) def getRuleIndex(self): return ShapeExpressionParser.RULE_atomic_exponential def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitAtomic_exponential" ): return visitor.visitAtomic_exponential(self) else: return visitor.visitChildren(self) def atomic_exponential(self): localctx = ShapeExpressionParser.Atomic_exponentialContext(self, self._ctx, self.state) self.enterRule(localctx, 22, self.RULE_atomic_exponential) try: self.enterOuterAlt(localctx, 1) self.state = 162 self.match(ShapeExpressionParser.EXPONENTIAL) self.state = 163 self.match(ShapeExpressionParser.LEFTPAREN) self.state = 164 self.match(ShapeExpressionParser.Identifier) self.state = 165 self.match(ShapeExpressionParser.COMMA) self.state = 166 self.match(ShapeExpressionParser.Identifier) self.state = 167 self.match(ShapeExpressionParser.COMMA) self.state = 168 self.match(ShapeExpressionParser.Identifier) self.state = 169 self.match(ShapeExpressionParser.COMMA) self.state = 170 self.match(ShapeExpressionParser.Identifier) self.state = 171 self.match(ShapeExpressionParser.RIGHTPAREN) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Atomic_sineContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def SINE(self): return self.getToken(ShapeExpressionParser.SINE, 0) def LEFTPAREN(self): return self.getToken(ShapeExpressionParser.LEFTPAREN, 0) def Identifier(self, i:int=None): if i is None: return self.getTokens(ShapeExpressionParser.Identifier) else: return self.getToken(ShapeExpressionParser.Identifier, i) def COMMA(self, i:int=None): if i is None: return self.getTokens(ShapeExpressionParser.COMMA) else: return self.getToken(ShapeExpressionParser.COMMA, i) def RIGHTPAREN(self): return self.getToken(ShapeExpressionParser.RIGHTPAREN, 0) def getRuleIndex(self): return ShapeExpressionParser.RULE_atomic_sine def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitAtomic_sine" ): return visitor.visitAtomic_sine(self) else: return visitor.visitChildren(self) def atomic_sine(self): localctx = ShapeExpressionParser.Atomic_sineContext(self, self._ctx, self.state) self.enterRule(localctx, 24, self.RULE_atomic_sine) try: self.enterOuterAlt(localctx, 1) self.state = 173 self.match(ShapeExpressionParser.SINE) self.state = 174 self.match(ShapeExpressionParser.LEFTPAREN) self.state = 175 self.match(ShapeExpressionParser.Identifier) self.state = 176 self.match(ShapeExpressionParser.COMMA) self.state = 177 self.match(ShapeExpressionParser.Identifier) self.state = 178 self.match(ShapeExpressionParser.COMMA) self.state = 179 self.match(ShapeExpressionParser.Identifier) self.state = 180 self.match(ShapeExpressionParser.COMMA) self.state = 181 self.match(ShapeExpressionParser.Identifier) self.state = 182 self.match(ShapeExpressionParser.COMMA) self.state = 183 self.match(ShapeExpressionParser.Identifier) self.state = 184 self.match(ShapeExpressionParser.RIGHTPAREN) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Atomic_sincContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def SINC(self): return self.getToken(ShapeExpressionParser.SINC, 0) def LEFTPAREN(self): return self.getToken(ShapeExpressionParser.LEFTPAREN, 0) def Identifier(self, i:int=None): if i is None: return self.getTokens(ShapeExpressionParser.Identifier) else: return self.getToken(ShapeExpressionParser.Identifier, i) def COMMA(self, i:int=None): if i is None: return self.getTokens(ShapeExpressionParser.COMMA) else: return self.getToken(ShapeExpressionParser.COMMA, i) def RIGHTPAREN(self): return self.getToken(ShapeExpressionParser.RIGHTPAREN, 0) def getRuleIndex(self): return ShapeExpressionParser.RULE_atomic_sinc def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitAtomic_sinc" ): return visitor.visitAtomic_sinc(self) else: return visitor.visitChildren(self) def atomic_sinc(self): localctx = ShapeExpressionParser.Atomic_sincContext(self, self._ctx, self.state) self.enterRule(localctx, 26, self.RULE_atomic_sinc) try: self.enterOuterAlt(localctx, 1) self.state = 186 self.match(ShapeExpressionParser.SINC) self.state = 187 self.match(ShapeExpressionParser.LEFTPAREN) self.state = 188 self.match(ShapeExpressionParser.Identifier) self.state = 189 self.match(ShapeExpressionParser.COMMA) self.state = 190 self.match(ShapeExpressionParser.Identifier) self.state = 191 self.match(ShapeExpressionParser.COMMA) self.state = 192 self.match(ShapeExpressionParser.Identifier) self.state = 193 self.match(ShapeExpressionParser.COMMA) self.state = 194 self.match(ShapeExpressionParser.Identifier) self.state = 195 self.match(ShapeExpressionParser.COMMA) self.state = 196 self.match(ShapeExpressionParser.Identifier) self.state = 197 self.match(ShapeExpressionParser.RIGHTPAREN) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class IntervalContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def IN(self): return self.getToken(ShapeExpressionParser.IN, 0) def LEFTPAREN(self): return self.getToken(ShapeExpressionParser.LEFTPAREN, 0) def literal(self, i:int=None): if i is None: return self.getTypedRuleContexts(ShapeExpressionParser.LiteralContext) else: return self.getTypedRuleContext(ShapeExpressionParser.LiteralContext,i) def COMMA(self): return self.getToken(ShapeExpressionParser.COMMA, 0) def RIGHTPAREN(self): return self.getToken(ShapeExpressionParser.RIGHTPAREN, 0) def getRuleIndex(self): return ShapeExpressionParser.RULE_interval def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitInterval" ): return visitor.visitInterval(self) else: return visitor.visitChildren(self) def interval(self): localctx = ShapeExpressionParser.IntervalContext(self, self._ctx, self.state) self.enterRule(localctx, 28, self.RULE_interval) try: self.enterOuterAlt(localctx, 1) self.state = 199 self.match(ShapeExpressionParser.IN) self.state = 200 self.match(ShapeExpressionParser.LEFTPAREN) self.state = 201 self.literal() self.state = 202 self.match(ShapeExpressionParser.COMMA) self.state = 203 self.literal() self.state = 204 self.match(ShapeExpressionParser.RIGHTPAREN) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Discrete_intervalContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def IN(self): return self.getToken(ShapeExpressionParser.IN, 0) def LEFTPAREN(self): return self.getToken(ShapeExpressionParser.LEFTPAREN, 0) def literal(self, i:int=None): if i is None: return self.getTypedRuleContexts(ShapeExpressionParser.LiteralContext) else: return self.getTypedRuleContext(ShapeExpressionParser.LiteralContext,i) def COMMA(self): return self.getToken(ShapeExpressionParser.COMMA, 0) def RIGHTPAREN(self): return self.getToken(ShapeExpressionParser.RIGHTPAREN, 0) def getRuleIndex(self): return ShapeExpressionParser.RULE_discrete_interval def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitDiscrete_interval" ): return visitor.visitDiscrete_interval(self) else: return visitor.visitChildren(self) def discrete_interval(self): localctx = ShapeExpressionParser.Discrete_intervalContext(self, self._ctx, self.state) self.enterRule(localctx, 30, self.RULE_discrete_interval) try: self.enterOuterAlt(localctx, 1) self.state = 206 self.match(ShapeExpressionParser.IN) self.state = 207 self.match(ShapeExpressionParser.LEFTPAREN) self.state = 208 self.literal() self.state = 209 self.match(ShapeExpressionParser.COMMA) self.state = 210 self.literal() self.state = 211 self.match(ShapeExpressionParser.RIGHTPAREN) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class LiteralContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def IntegerLiteral(self): return self.getToken(ShapeExpressionParser.IntegerLiteral, 0) def RealLiteral(self): return self.getToken(ShapeExpressionParser.RealLiteral, 0) def MINUS(self): return self.getToken(ShapeExpressionParser.MINUS, 0) def literal(self): return self.getTypedRuleContext(ShapeExpressionParser.LiteralContext,0) def getRuleIndex(self): return ShapeExpressionParser.RULE_literal def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitLiteral" ): return visitor.visitLiteral(self) else: return visitor.visitChildren(self) def literal(self): localctx = ShapeExpressionParser.LiteralContext(self, self._ctx, self.state) self.enterRule(localctx, 32, self.RULE_literal) try: self.state = 217 self._errHandler.sync(self) token = self._input.LA(1) if token in [ShapeExpressionParser.IntegerLiteral]: self.enterOuterAlt(localctx, 1) self.state = 213 self.match(ShapeExpressionParser.IntegerLiteral) pass elif token in [ShapeExpressionParser.RealLiteral]: self.enterOuterAlt(localctx, 2) self.state = 214 self.match(ShapeExpressionParser.RealLiteral) pass elif token in [ShapeExpressionParser.MINUS]: self.enterOuterAlt(localctx, 3) self.state = 215 self.match(ShapeExpressionParser.MINUS) self.state = 216 self.literal() pass else: raise NoViableAltException(self) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int): if self._predicates == None: self._predicates = dict() self._predicates[6] = self.expression_sempred self._predicates[7] = self.regular_expression_sempred pred = self._predicates.get(ruleIndex, None) if pred is None: raise Exception("No predicate with index:" + str(ruleIndex)) else:
0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1], [1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0], [1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1], [0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1], [1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0], [1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1], [1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1], [0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1], [1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0], [1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1], [0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0], [1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0], [1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0], [1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1], [1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1,
from __future__ import absolute_import from __future__ import print_function import sys import abc import copy import logging import re from abc import abstractmethod from collections import OrderedDict from .dna_reshapers import ReshapeDnaString, ReshapeDna from .mutators import OneHotSequenceMutator, DNAStringSequenceMutator, rc_str import numpy as np import kipoi logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) def is_indel_wrapper(record): if record.is_indel: return True if len(record.ALT) == 0 or len(record.REF) == 0: return True if record.REF == "." or record.REF == b".": return True return False def ensure_tabixed_vcf(input_fn, is_sorted=False, force_tabix=True): import pybedtools import pysam pbh = pybedtools.BedTool(input_fn) fn = input_fn if not pbh._tabixed(): # pybedtools bug. fn = pbh.bgzip(in_place=True, force=force_tabix) pysam.tabix_index(fn, force=force_tabix, preset="vcf") # tbxd = pbh.tabix(is_sorted=is_sorted, force=force_tabix) # fn = tbxd.fn return fn def prep_str(s): # https://stackoverflow.com/questions/1007481/how-do-i-replace-whitespaces-with-underscore-and-vice-versa # Remove all non-word characters (everything except numbers and letters) # s = re.sub(r"[^\w\s]", '', s) s = re.sub(r"[^\w\.\:\s/]+", '_', s) # # Replace all runs of whitespace with a single underscore s = re.sub(r"\s+", '_', s) # return s def select_from_dl_batch(obj, rows, nrows_expected=None): def subset(in_obj): if nrows_expected is not None: if not in_obj.shape[0] == nrows_expected: raise Exception("Error selecting: Expected the first dimension to have %d rows!" % nrows_expected) return in_obj[rows, ...] if isinstance(obj, dict): out_obj = {} if isinstance(obj, OrderedDict): out_obj = OrderedDict() for k in obj: out_obj[k] = subset(obj[k]) elif isinstance(obj, list): out_obj = [subset(el) for el in obj] else: out_obj = subset(obj) return out_obj def write_hdf5(fname, data): """Generic hdf5 bulk writer """ if isinstance(data, list): data = {"_list_{i}".format(i=i): v for i, v in enumerate(data)} import deepdish deepdish.io.save(fname, data) # Alternative # def recursive_h5_mutmap_writer(objs, handle, path=""): # import six # for key in objs.keys(): # if isinstance(objs[key], dict): # g = handle.create_group(key) # recursive_h5_mutmap_writer(objs[key], g, path=path + "/" + key) # else: # if isinstance(objs[key], list) or isinstance(objs[key], np.ndarray): # el = np.array(objs[key]) # if "U" in el.dtype.str: # el = el.astype("S") # handle.create_dataset(name=path + "/" + key, data=el, chunks=True, compression='gzip') # else: # el = objs[key] # if isinstance(el, six.string_types): # el = str(el) # handle.create_dataset(name=path + "/" + key, data=el) def read_hdf5(fname): """Generic hdf5 bulk reader """ import deepdish data = deepdish.io.load(fname) if isinstance(data, dict) and list(data.keys())[0].startswith("_list_"): data = [data["_list_{i}".format(i=i)] for i in range(len(data))] return data # def recursive_h5_mutmap_reader(handle): # import h5py # objs = {} # for key in handle.keys(): # if isinstance(handle[key], h5py.Group): # objs[key] = recursive_h5_mutmap_reader(handle[key]) # else: # if isinstance(handle[key], h5py.Dataset): # el = handle[key].value # if isinstance(el, np.ndarray): # if "S" in el.dtype.str: # el = el.astype(str) # objs[key] = el # return objs def _get_seq_len(input_data): if isinstance(input_data, (list, tuple)): return input_data[0].shape elif isinstance(input_data, dict): for k in input_data: return input_data[k].shape elif isinstance(input_data, np.ndarray): return input_data.shape else: raise ValueError("Input can only be of type: list, dict or np.ndarray") def concat_columns(df, sep="|"): """Concatenate all columns of a dataframe into a pd.Series """ for i in range(df.shape[1]): vec = df.iloc[:, i].astype(str) if i == 0: column = vec else: column = column.str.cat(vec, sep=sep) return column # TODO: generalise so that also FORMAT, FILTER and sample identifiers are supported... def convert_record(input_record, pyvcf_reader): """ Convert a cyvcf2 record into a pyvcf record. The source files should at least be similar in terms of INFO tags. FILTER and FORMAT tags might not be handeled correctly at the moment! """ import vcf def revert_to_info(info_obj): out_str_elms = [] for el in list(info_obj): out_str_elms.append(u"{0}={1}".format(*el)) if len(out_str_elms) > 0: if sys.version_info[0] < 3: return pyvcf_reader._parse_info(u";".join(out_str_elms).encode("ascii", "ignore")) else: return pyvcf_reader._parse_info(u";".join(out_str_elms)) else: return {} # info_tag = revert_to_info(input_record.INFO) alt = pyvcf_reader._map(pyvcf_reader._parse_alt, input_record.ALT) return vcf.model._Record(input_record.CHROM, input_record.POS, input_record.ID, input_record.REF, alt, input_record.QUAL, input_record.FILTER, info_tag, input_record.FORMAT, {}) def default_vcf_id_gen(vcf_record, id_delim=":"): # make sure that also in python2 the variant output is like in python3 alt_ids = str([str(alt) for alt in vcf_record.ALT]) return str(vcf_record.CHROM) + id_delim + str(vcf_record.POS) + id_delim + str(vcf_record.REF) + id_delim + alt_ids class RegionGenerator(object): __metaclass__ = abc.ABCMeta def __init__(self, model_info_extractor, seq_length=None): self.seq_length = None self.centered_l_offset = None self.centered_r_offset = None self.model_info_extractor = model_info_extractor @abstractmethod def __call__(self, variant): """single variant instance yielded by vcf_iter """ pass class SnvCenteredRg(RegionGenerator): def __init__(self, model_info_extractor, seq_length=None): """ Arguments: model_info_extractor: ModelInfoExtractor object. seq_length: Not required parameter: Sequence length in case model has variable sequence length input """ super(SnvCenteredRg, self).__init__(model_info_extractor) if seq_length is not None: self.seq_length = seq_length else: self.seq_length = model_info_extractor.get_seq_len() if self.seq_length is None: raise Exception("Model input sequence length is not defined. Please set it manually using `seq_length`") seq_length_half = int(self.seq_length / 2) self.centered_l_offset = seq_length_half - 1 self.centered_r_offset = seq_length_half + self.seq_length % 2 # self.centered_l_offset = seq_length_half # self.centered_r_offset = seq_length_half + self.seq_length % 2 -1 def __call__(self, variant_record): """single variant instance yielded by vcf_iter """ return {"chrom": [variant_record.CHROM], "start": [variant_record.POS - self.centered_l_offset], "end": [variant_record.POS + self.centered_r_offset], } class BedOverlappingRg(RegionGenerator): def __init__(self, model_info_extractor, seq_length=None): super(BedOverlappingRg, self).__init__(model_info_extractor) if seq_length is not None: self.seq_length = seq_length else: self.seq_length = model_info_extractor.get_seq_len() if self.seq_length is None: raise Exception("Model input sequence length is not defined. Please set it manually using `seq_length`") def __call__(self, bed_entry): """Generate regions based on a bed file entry. outputs consecutive regions of model sequence length starting from bed_entry.start and reaching at least until bed_entry.end. Output regions are non-overlapping hence the covered output regions may cover more genetic space than specified in bed_entry. (Overhanging tail) """ chroms = [] starts = [] ends = [] ids = [] region_len = bed_entry.end - bed_entry.start num_intervals = region_len // self.seq_length + int((region_len % self.seq_length) != 0) for i in range(num_intervals): chroms.append(bed_entry.chrom) starts.append(bed_entry.start + (i * self.seq_length)) ends.append(bed_entry.start + ((i + 1) * self.seq_length)) ids.append(bed_entry.name + ".%d" % i) return {"chrom": chroms, "start": starts, "end": ends, "id": ids} class SnvPosRestrictedRg(RegionGenerator): def __init__(self, model_info_extractor, pybed_def, seq_length=None): super(SnvPosRestrictedRg, self).__init__(model_info_extractor) self.tabixed = pybed_def.tabix(in_place=False) if seq_length is not None: self.seq_length = seq_length else: self.seq_length = model_info_extractor.get_seq_len() if self.seq_length is None: raise Exception("Model input sequence length is not defined. Please set it manually using `seq_length`") seq_length_half = int(self.seq_length / 2) self.centered_l_offset = seq_length_half - 1 self.centered_r_offset = seq_length_half + self.seq_length % 2 def __call__(self, variant_record): """single variant instance yielded by vcf_iter """ overlap = self.tabixed.tabix_intervals( "%s:%d-%d" % (variant_record.CHROM, variant_record.POS, variant_record.POS + 1)) chroms = [] starts = [] ends = [] for interval in overlap: i_s = interval.start + 1 i_e = interval.end if len(interval) < self.seq_length: continue if len(interval) != self.seq_length: centered_se = np.array( [(variant_record.POS - self.centered_l_offset), (variant_record.POS + self.centered_r_offset)]) start_missing = centered_se[0] - i_s # >=0 if ok end_missing = i_e - centered_se[1] # >=0 if ok if start_missing < 0: centered_se -= start_missing # shift right elif end_missing < 0: centered_se += end_missing # shift left assert centered_se[1] - centered_se[0] + 1 == self.seq_length assert (i_s <= centered_se[0]) and (i_e >= centered_se[1]) i_s, i_e = centered_se.tolist() chroms.append(variant_record.CHROM) starts.append(i_s) ends.append(i_e) return {"chrom": chroms, "start": starts, "end": ends} class ModelInfoExtractor(object): def __init__(self, model_obj, dataloader_obj): self.model = model_obj self.dataloader = dataloader_obj self.seq_fields = _get_seq_fields(model_obj) # Here we really have to go and collect all the possible different input DNA sequences and prepare the correct # transformation to standard # Collect the different sequence inputs and the corresponfing ranges objects: self.seq_input_metadata = {} self.seq_input_mutator = {} self.seq_to_str_converter = {} self.seq_input_array_trafo = {} for seq_field in self.seq_fields: special_type = _get_specialtype(dataloader_obj, seq_field) if special_type is None: logger.warn("special_type of sequence field '%s' is not set," "assuming 1-hot encoded DNA sequence." % str(seq_field)) if (special_type is None) or (special_type == kipoi.components.ArraySpecialType.DNASeq): dna_seq_trafo = ReshapeDna(_get_seq_shape_model(model_obj, seq_field)) self.seq_input_mutator[seq_field] = OneHotSequenceMutator(dna_seq_trafo) self.seq_to_str_converter[seq_field] = OneHotSeqExtractor(dna_seq_trafo) self.seq_input_array_trafo[seq_field] = dna_seq_trafo if special_type == kipoi.components.ArraySpecialType.DNAStringSeq: dna_seq_trafo = ReshapeDnaString(_get_seq_shape_model(model_obj, seq_field)) self.seq_input_mutator[seq_field] = DNAStringSequenceMutator(dna_seq_trafo) self.seq_to_str_converter[seq_field] = StrSeqExtractor(dna_seq_trafo) self.seq_input_array_trafo[seq_field] = dna_seq_trafo self.seq_input_metadata[seq_field] = _get_metadata_name(dataloader_obj, seq_field) # If then where do I have to put my bed file in the command? self.exec_files_bed_keys = _get_dl_bed_fields(dataloader_obj) self.requires_region_definition = False # If there is a field for putting the a postprocessing bed file, then generate the bed file. if (self.exec_files_bed_keys is not None) and (len(self.exec_files_bed_keys) != 0): self.requires_region_definition = True self.seq_length = None # None means either not requires_region_definition or undefined sequence length if self.requires_region_definition: # seems to require a bed file definition, so try to assign a sequence length: seq_lens = [self.seq_input_array_trafo[seq_field].get_seq_len() for seq_field in self.seq_input_array_trafo] seq_len = list(set([el for el in seq_lens])) seq_len_noNone = [el for el in seq_len if el is not None] if len(seq_len) == 0: raise Exception("dataloader.yaml defines postprocessing > args > bed_input, but in
from datetime import date from models import gtfs, config, util, nextbus, routeconfig import argparse import shapely import partridge as ptg import numpy as np from pathlib import Path import requests import json import boto3 import gzip import hashlib import math import zipfile # Downloads and parses the GTFS specification # and saves the configuration for all routes to S3. # The S3 object contains data merged from GTFS and the Nextbus API (for agencies using Nextbus). # The frontend can then request this S3 URL directly without hitting the Python backend. # For each direction, the JSON object contains a coords array defining the shape of the route, # where the values are objects containing lat/lon properties: # # "coords":[ # {"lat":37.80707,"lon":-122.41727} # {"lat":37.80727,"lon":-122.41562}, # {"lat":37.80748,"lon":-122.41398}, # {"lat":37.80768,"lon":-122.41234}, # ... # ] # # For each direction, the JSON object also contains a stop_geometry object where the keys are stop IDs # and the values are objects with a distance property (cumulative distance in meters to that stop along the GTFS # shape), # and an after_index property (index into the coords array of the last coordinate before that stop). # # "stop_geometry":{ # "5184":{"distance":8,"after_index":0}, # "3092":{"distance":279,"after_index":1}, # "3095":{"distance":573,"after_index":3}, # "4502":{"distance":1045,"after_index":8}, # ... #} # # In order to match a Nextbus direction with a GTFS shape_id, this finds the GTFS shape_id for that route where # distance(first coordinate of shape, first stop location) + distance(last coordinate of shape, last stop location) # is a minimum. # # Currently the script just overwrites the one S3 path, but this process could be extended in the future to # store different paths for different dates, to allow fetching historical data for route configurations. # def match_nextbus_direction(nextbus_route_config, geometry): shape_start = geometry.coords[0] shape_end = geometry.coords[-1] nextbus_dir_infos = nextbus_route_config.get_direction_infos() terminal_dists = [] for nextbus_dir_info in nextbus_dir_infos: nextbus_dir_stop_ids = nextbus_dir_info.get_stop_ids() first_stop_info = nextbus_route_config.get_stop_info(nextbus_dir_stop_ids[0]) last_stop_info = nextbus_route_config.get_stop_info(nextbus_dir_stop_ids[-1]) # Determine distance between first nextbus stop and start of GTFS shape, # plus distance between last stop and end of GTFS shape, # for all Nextbus directions for this route. start_dist = util.haver_distance(first_stop_info.lat, first_stop_info.lon, shape_start[1], shape_start[0]) end_dist = util.haver_distance(last_stop_info.lat, last_stop_info.lon, shape_end[1], shape_end[0]) terminal_dist = start_dist + end_dist terminal_dists.append(terminal_dist) terminal_dist_order = np.argsort(terminal_dists) best_nextbus_dir_index = terminal_dist_order[0] # index of the "best" shape for this direction, with the minimum terminal_dist best_nextbus_dir_info = nextbus_dir_infos[best_nextbus_dir_index] best_terminal_dist = terminal_dists[best_nextbus_dir_index] return best_nextbus_dir_info, best_terminal_dist def get_stop_geometry(stop_xy, shape_lines_xy, shape_cumulative_dist, start_index): # Finds the first position of a particular stop along a shape (after the start_index'th line segment in shape_lines_xy), # using XY coordinates in meters. # The returned dict is used by the frontend to draw line segments along a route between two stops. num_shape_lines = len(shape_lines_xy) best_offset = 99999999 best_index = 0 shape_index = start_index while shape_index < num_shape_lines: shape_line_offset = shape_lines_xy[shape_index].distance(stop_xy) if shape_line_offset < best_offset: best_offset = shape_line_offset best_index = shape_index if best_offset < 50 and shape_line_offset > best_offset: break shape_index += 1 shape_point = shapely.geometry.Point(shape_lines_xy[best_index].coords[0]) distance_after_shape_point = stop_xy.distance(shape_point) distance_to_shape_point = shape_cumulative_dist[best_index] stop_dist = distance_to_shape_point + distance_after_shape_point if best_offset > 30: print(f' stop_dist = {int(stop_dist)} = ({int(distance_to_shape_point)} + {int(distance_after_shape_point)}), offset = {int(best_offset)}, after_index = {best_index} ') return { 'distance': int(stop_dist), # total distance in meters along the route shape to this stop 'after_index': best_index, # the index of the coordinate of the shape just before this stop 'offset': int(best_offset) # distance in meters between this stop and the closest line segment of shape } def get_unique_shapes(direction_trips_df, stop_times_df, stops_map, normalize_gtfs_stop_id): # Finds the unique shapes associated with a GTFS route/direction, merging shapes that contain common subsequences of stops. # These unique shapes may represent multiple branches of a route. # Returns a list of dicts with properties 'shape_id', 'count', and 'stop_ids', sorted by count in descending order. stop_times_trip_id_values = stop_times_df['trip_id'].values direction_shape_id_values = direction_trips_df['shape_id'].values unique_shapes_map = {} direction_shape_ids, direction_shape_id_counts = np.unique(direction_shape_id_values, return_counts=True) direction_shape_id_order = np.argsort(-1 * direction_shape_id_counts) direction_shape_ids = direction_shape_ids[direction_shape_id_order] direction_shape_id_counts = direction_shape_id_counts[direction_shape_id_order] for shape_id, shape_id_count in zip(direction_shape_ids, direction_shape_id_counts): shape_trip = direction_trips_df[direction_shape_id_values == shape_id].iloc[0] shape_trip_id = shape_trip.trip_id shape_trip_stop_times = stop_times_df[stop_times_trip_id_values == shape_trip_id].sort_values('stop_sequence') shape_trip_stop_ids = [ normalize_gtfs_stop_id(gtfs_stop_id) for gtfs_stop_id in shape_trip_stop_times['stop_id'].values ] unique_shape_key = hashlib.sha256(json.dumps(shape_trip_stop_ids).encode('utf-8')).hexdigest()[0:12] #print(f' shape {shape_id} ({shape_id_count})') if unique_shape_key not in unique_shapes_map: for other_shape_key, other_shape_info in unique_shapes_map.items(): #print(f" checking match with {shape_id} and {other_shape_info['shape_id']}") if is_subsequence(shape_trip_stop_ids, other_shape_info['stop_ids']): print(f" shape {shape_id} is subsequence of shape {other_shape_info['shape_id']}") unique_shape_key = other_shape_key break elif is_subsequence(other_shape_info['stop_ids'], shape_trip_stop_ids): print(f" shape {other_shape_info['shape_id']} is subsequence of shape {shape_id}") shape_id_count += other_shape_info['count'] del unique_shapes_map[other_shape_key] break if unique_shape_key not in unique_shapes_map: unique_shapes_map[unique_shape_key] = { 'count': 0, 'shape_id': shape_id, 'stop_ids': shape_trip_stop_ids } unique_shapes_map[unique_shape_key]['count'] += shape_id_count sorted_shapes = sorted(unique_shapes_map.values(), key=lambda shape: -1 * shape['count']) for shape_info in sorted_shapes: count = shape_info['count'] shape_id = shape_info['shape_id'] stop_ids = shape_info['stop_ids'] first_stop_id = stop_ids[0] last_stop_id = stop_ids[-1] first_stop = stops_map[first_stop_id] last_stop = stops_map[last_stop_id] print(f' shape_id: {shape_id} ({count}x) stops:{len(stop_ids)} from {first_stop_id} {first_stop.stop_name} to {last_stop_id} {last_stop.stop_name} {",".join(stop_ids)}') return sorted_shapes def download_gtfs_data(agency: config.Agency, gtfs_cache_dir): gtfs_url = agency.gtfs_url if gtfs_url is None: raise Exception(f'agency {agency.id} does not have gtfs_url in config') cache_dir = Path(gtfs_cache_dir) if not cache_dir.exists(): print(f'downloading gtfs data from {gtfs_url}') r = requests.get(gtfs_url) if r.status_code != 200: raise Exception(f"Error fetching {gtfs_url}: HTTP {r.status_code}: {r.text}") zip_path = f'{util.get_data_dir()}/gtfs-{agency.id}.zip' with open(zip_path, 'wb') as f: f.write(r.content) with zipfile.ZipFile(zip_path, 'r') as zip_ref: zip_ref.extractall(gtfs_cache_dir) def is_subsequence(smaller, bigger): smaller_len = len(smaller) bigger_len = len(bigger) if smaller_len > bigger_len: return False try: start_pos = bigger.index(smaller[0]) except ValueError: return False end_pos = start_pos+smaller_len if end_pos > bigger_len: return False return smaller == bigger[start_pos:end_pos] def save_routes_for_agency(agency: config.Agency, save_to_s3=True): agency_id = agency.id gtfs_cache_dir = f'{util.get_data_dir()}/gtfs-{agency_id}' download_gtfs_data(agency, gtfs_cache_dir) feed = ptg.load_geo_feed(gtfs_cache_dir, {}) print(f"Loading {agency_id} routes...") routes_df = feed.routes if agency.gtfs_agency_id is not None: routes_df = routes_df[routes_df.agency_id == agency.gtfs_agency_id] routes_data = [] print(f"Loading {agency_id} trips...") trips_df = feed.trips trips_df['direction_id'] = trips_df['direction_id'].astype(str) print(f"Loading {agency_id} stop times...") stop_times_df = feed.stop_times print(f"Loading {agency_id} shapes...") shapes_df = feed.shapes print(f"Loading {agency_id} stops...") stops_df = feed.stops # gtfs_stop_ids_map allows looking up row from stops.txt via GTFS stop_id gtfs_stop_ids_map = {stop.stop_id: stop for stop in stops_df.itertuples()} stop_id_gtfs_field = agency.stop_id_gtfs_field # get OpenTransit stop ID for GTFS stop_id (may be the same) def normalize_gtfs_stop_id(gtfs_stop_id): if stop_id_gtfs_field != 'stop_id': return getattr(gtfs_stop_ids_map[gtfs_stop_id], stop_id_gtfs_field) else: return gtfs_stop_id # stops_map allows looking up row from stops.txt via OpenTransit stop ID if stop_id_gtfs_field != 'stop_id': stops_map = {getattr(stop, stop_id_gtfs_field): stop for stop in stops_df.itertuples()} else: stops_map = gtfs_stop_ids_map if agency.provider == 'nextbus': nextbus_route_order = [route.id for route in nextbus.get_route_list(agency.nextbus_id)] for route in routes_df.itertuples(): gtfs_route_id = route.route_id short_name = route.route_short_name long_name = route.route_long_name if isinstance(short_name, str) and isinstance(long_name, str): title = f'{short_name} - {long_name}' elif isinstance(short_name, str): title = short_name else: title = long_name type = int(route.route_type) if hasattr(route, 'route_type') else None url = route.route_url if hasattr(route, 'route_url') and isinstance(route.route_url, str) else None #color = route.route_color #text_color = route.route_text_color route_id = getattr(route, agency.route_id_gtfs_field) if agency.provider == 'nextbus': route_id = route_id.replace('-', '_') # hack to handle muni route IDs where e.g. GTFS has "T-OWL" but nextbus has "T_OWL" try: nextbus_route_config = nextbus.get_route_config(agency.nextbus_id, route_id) title = nextbus_route_config.title except Exception as ex: print(ex) continue try: sort_order = nextbus_route_order.index(route_id) except ValueError as ex: print(ex) sort_order = None else: sort_order = int(route.route_sort_order) if hasattr(route, 'route_sort_order') else None print(f'route {route_id} {title}') route_data = { 'id': route_id, 'title': title, 'url': url, 'type': type, #'color': color, #'text_color': text_color, 'gtfs_route_id': gtfs_route_id, 'sort_order': sort_order, 'stops': {}, 'directions': [], } directions = [] route_directions_df = feed.get('route_directions.txt') # unofficial trimet gtfs extension if not route_directions_df.empty: route_directions_df = route_directions_df[route_directions_df['route_id'] == gtfs_route_id] else: route_directions_df = None routes_data.append(route_data) route_trips_df = trips_df[trips_df['route_id'] == gtfs_route_id] route_direction_id_values = route_trips_df['direction_id'].values def add_custom_direction(custom_direction_info): direction_id = custom_direction_info['id'] print(f' custom direction = {direction_id}') gtfs_direction_id = custom_direction_info['gtfs_direction_id'] direction_trips_df = route_trips_df[route_direction_id_values == gtfs_direction_id] included_stop_ids = custom_direction_info.get('included_stop_ids', []) excluded_stop_ids = custom_direction_info.get('excluded_stop_ids', []) shapes = get_unique_shapes( direction_trips_df=direction_trips_df, stop_times_df=stop_times_df, stops_map=stops_map, normalize_gtfs_stop_id=normalize_gtfs_stop_id ) def contains_included_stops(shape_stop_ids): min_index = 0 for stop_id in included_stop_ids: try: index = shape_stop_ids.index(stop_id, min_index) except ValueError: return False min_index = index + 1 # stops must appear in same order as in included_stop_ids return True def contains_excluded_stop(shape_stop_ids): for stop_id in excluded_stop_ids: try: index = shape_stop_ids.index(stop_id) return True except ValueError: pass return False matching_shapes = [] for shape in shapes: shape_stop_ids = shape['stop_ids'] if contains_included_stops(shape_stop_ids) and not contains_excluded_stop(shape_stop_ids): matching_shapes.append(shape) if len(matching_shapes) != 1: matching_shape_ids = [shape['shape_id'] for shape in matching_shapes] error_message = f'{len(matching_shapes)} shapes found for route {route_id} with
? AND preregister = 'Y'""", (tokenrow, block_begins)).fetchone() if advertisement: raise TokenError("An existing future advertisement allows preregistration") # available balance validation and update balance = self._get_balance(cursor, address) if units_avail: if balance < units_avail: raise TokenError("Insufficient available balance to make available") else: units_avail = balance if units_min: if units_min > units_avail: raise TokenError("Insufficient available balance for the specified minimum units") else: units_min = 1 if units_max: # note that it's not an error if units_max > units_avail... this allows a per-user maximum to be # set when units_avail might not be specified pass else: units_max = units_avail cursor.execute('''UPDATE balance SET updated = ?, available = available - ? WHERE token = ? AND address = ?''', (blockrow, units_avail, tokenrow, address)) # save advertise event cursor.execute('''INSERT INTO advertisement (tx, token, created, updated, begins, ends, delivers, available, dispensed, rate, minimum, maximum, preregister) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''', (txrow, tokenrow, blockrow, blockrow, block_begin, block_end, block_deliver, units_avail, 0, units_min, units_max, 'Y' if preregister else None)) return cursor.lastrowid def token_advertise_cancel(self, address, txrow, blockrow, txhash): cursor = self.conn.cursor() tokenrow = self._get_tokenrow(cursor, address) # validate advertisement advertisement = cursor.execute('''SELECT a.rowid, a.token, a.finished, a.available, a.claimed FROM tx LEFT JOIN advertisement a ON a.tx = tx.rowid WHERE tx.hash = ?''', (txhash,)).fetchone() if not advertisement: raise TokenError("No advertisement exists for the given tx hash") if tokenrow != advertisement[1]: raise TokenError("Advertisement at the specified tx hash does not match the token indicated") if advertisement[2] is not None: raise TokenError("The advertisement has already finished") # validate registrations registrations = cursor.execute('''SELECT 1 FROM registration WHERE advertisement = ? LIMIT 1''', (advertisement[0],)) if registrations: #FIXME: just check that 'claimed' == 0 instead? raise TokenError("There have already been registrations for this advertisement; it cannot be cancelled") if advertisement[4] != 0: raise ValueError("This advertisement indicates claims but no registrations were found") # close advertisement and make balance available again cursor.execute('''UPDATE advertisement SET updated = ?, finished = ? WHERE rowid = ?''', (blockrow, blockrow, advertisement[0])) cursor.execute('''UPDATE balance SET updated = ?, available = available + ? WHERE token = ? AND address = ?''', (blockrow, advertisement[3], tokenrow, address)) return advertisement[0] def get_eligible_advertisement_row(self, cursor, tokenrow, height): advertisement = None advertisements = cursor.execute('''SELECT rowid FROM advertisement WHERE token = ? AND finished IS NULL AND begins <= ? AND (ends IS NULL OR ends >= ?)''', (tokenrow, height, height)).fetchall() if advertisements: if len(advertisements) > 1: raise ValueError("There are multiple active advertisements") advertisement = advertisements[0] advertisements = cursor.execute("""SELECT rowid FROM advertisement WHERE token = ? AND finished IS NULL AND begins > ? AND preregister = 'Y'""", (tokenrow, height)).fetchall() if advertisements: if advertisement: raise ValueError("There is an active advertisement but also a future advertisement allowing preregistration") if len(advertisements) > 1: raise ValueError("There are multiple future advertisements allowing preregistration") advertisement = advertisements[0] if not advertisement: raise TokenError("There is no active advertisement or future advertisement allowing preregistration") return advertisement[0] def token_register(self, address, txrow, blockrow, user_address, units_max=None): cursor = self.conn.cursor() tokenrow = self._get_tokenrow(cursor, address) height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0] advertisement = self.get_eligible_advertisement_row(cursor, tokenrow, height) advertisement = cursor.execute('''SELECT rowid, minimum, maximum, rate, available, claimed, delivers FROM advertisement WHERE rowid = ?''', (advertisement,)).fetchone() if units_max < advertisement[1]: raise TokenError('Specified maximum is less than the advertisement user-minimum required') registrations = cursor.execute('''SELECT SUM(maximum) FROM registration WHERE address = ? and advertisement = ?''', (user_address, advertisement[0])).fetchone() max_remains = advertisement[2] if registrations: max_remains -= registrations[0] if max_remains < 1: raise TokenError('Maximum per-user units has already been registered') unclaimed = advertisement[4] - advertisement[5] if unclaimed < max_remains: max_remains = unclaimed if units_max > max_remains: units_max = max_remains if not advertisement[3]: # free faucet units = units_max available = (height > advertisement[6]) # note that if height == delivers then process_advertisements() will make the units available # update source balance cursor.execute('''UPDATE balance SET updated = ?, units = units - ? WHERE token = ? AND address = ?''', (blockrow, units, tokenrow, address)) # update destination balance balance = self._get_balance(cursor, tokenrow, user_address) if balance is None: cursor.execute('''INSERT INTO balance (address, token, updated, units, available) VALUES (?, ?, ?, ?, ?)''', (user_address, tokenrow, blockrow, units, units if available else 0)) else: cursor.execute('''UPDATE balance SET updated = ?, units = units + ?, available = available + ? WHERE token = ? AND address = ?''', (blockrow, units, units if available else 0, tokenrow, user_address)) cursor.execute('''UPDATE advertisement SET updated = ?, claimed = claimed + ? WHERE rowid = ?''', (blockrow, units, advertisement[0])) # save transfer event cursor.execute('''INSERT INTO transfer (tx, created, addr_from, addr_to, units) VALUES (?, ?, ?, ?, ?)''', (txrow, blockrow, address, user_address, units)) else: units = 0 cursor.execute('''INSERT INTO registration (tx, address, advertisement, created, updated, finished, maximum, payments, claimed) VALUES (?, ?, ?, ?, ?, ?, ?, ?)''', (txrow, user_address, advertisement[0], blockrow, blockrow, blockrow if advertisement[3] else None, units_max, 0, units)) return cursor.lastrowid def token_unregister(self, address, txrow, blockrow, user_address): cursor = self.conn.cursor() tokenrow = self._get_tokenrow(cursor, address) height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0] advertisement = self.get_eligible_advertisement_row(cursor, tokenrow, height) registrations = cursor.execute('''SELECT rowid, token FROM registration WHERE address = ? AND advertisement = ? AND finished IS NULL''', (user_address, advertisement)).fetchall() if not registrations: raise TokenError("No active registration was found") if len(registrations) > 1: raise ValueError("Multiple active registrations found") registration = registrations[0] if registration[1] != tokenrow: raise ValueError("This registration token does not match the advertisement token") cursor.execute('''UPDATE registration SET updated = ?, finished = ? WHERE rowid = ?''', (blockrow, blockrow, registration[0])) return registration[0] def get_active_registrations_map(self, blockrow): cursor = self.conn.cursor() height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0] registrations = cursor.execute('''SELECT t.address, r.address, r.rowid FROM registration r LEFT JOIN advertisement a ON a.rowid = r.advertisement LEFT JOIN token t ON t.rowid = a.token WHERE r.finished IS NULL AND a.finished IS NULL AND a.begins <= ?''', (height,)).fetchall() reg_map = {} if registrations: for registration in registrations: try: records = reg_map[registration[0]] except AttributeError: records = {} reg_map[registration[0]] = records try: rowid = records[registration[1]] raise ValueError('Already have an active registration for this user and token') except AttributeError: records[registration[1]] = registration[2] return reg_map def registration_payment(self, txrow, blockrow, rowid, value): cursor = self.conn.cursor() height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0] details = cursor.execute('''SELECT r.address, r.maximum, r.payments, r.claimed, a.rowid, a.delivers, a.available, a.claimed, a.rate, a.minimum, a.maximum, t.rowid, t.address FROM registration r LEFT JOIN advertisement a ON a.rowid = r.advertisement LEFT JOIN token t ON t.rowid = a.token WHERE r.rowid = ?''', (rowid,)).fetchone() claimed = cursor.execute('''SELECT SUM(claimed) FROM registration WHERE address = ? AND advertisement = ? AND rowid <> ?''', (details[0], details[4], rowid)).fetchone()[0] ad_remaining = details[6] - details[7] user_remaining = details[10] - claimed - details[3] if ad_remaining < user_remaining: user_remaining = ad_remaining if details[1] < user_remaining: user_remaining = details[1] payments = details[2] + value rate = details[8] if rate: if rate < 0: units = payments // (-1 * rate) else: units = payments * rate if units < details[9]: units = 0 else: units -= details[3] if units > user_remaining: units = user_remaining else: units = user_remaining if units > 0: available = (height > details[5]) # note that if height == delivers then process_advertisements() will make the units available # update source balance cursor.execute('''UPDATE balance SET updated = ?, units = units - ? WHERE token = ? AND address = ?''', (blockrow, units, details[11], details[12])) # update destination balance balance = self._get_balance(cursor, details[11], details[0]) if balance is None: cursor.execute('''INSERT INTO balance (address, token, updated, units, available) VALUES (?, ?, ?, ?, ?)''', (details[0], details[11], blockrow, units, units if available else 0)) else: cursor.execute('''UPDATE balance SET updated = ?, units = units + ?, available = available + ? WHERE token = ? AND address = ?''', (blockrow, units, units if available else 0, details[11], details[0])) # save transfer
# Geographic geometry utility functions # I tested geo-py but precision was inferior(?). # I'd love to user pyturf but it does not have all function I need # and it loads heavy packages. # So I made the functions I need. # import math import json # Geology constants R = 6371000 # Radius of third rock from the sun, in metres FT = 12 * 0.0254 # 1 FOOT = 12 INCHES NAUTICAL_MILE = 1.852 # Nautical mile in meters 6076.118ft=1nm def toNm(m): return math.round(m / NAUTICAL_MILE) def toFeet(m): return math.round(m / FT) def toMeter(f): return math.round(ft * FT) def toKn(kmh): return kmh / NAUTICAL_MILE def toKmh(kn): return kn * NAUTICAL_MILE def convertAngleTo360(alfa): beta = alfa % 360 if beta < 0: beta = beta + 360 return beta def turn(bi, bo): t = bi - bo if t < 0: t += 360 if t > 180: t -= 360 return t def sign(x): # there is no sign function in python... if x < 0: return -1 elif x > 0: return 1 return 0 class Feature: def __init__(self): self.properties = {} self.featureType = "Feature" def props(self): return self.properties def setProp(self, name, value): self.properties[name] = value def getProp(self, name): if name in self.properties.keys(): return self.properties[name] return None def feature(self): return { "type": "Feature", "geometry": self.geom(True), "properties": self.props() } def geom(self, lonLat=False): return { "type": self.geomType, "coordinates": self.coords(lonLat) } def __str__(self): return json.dumps(self.feature()) class FeatureCollection: def __init__(self, features=[]): self.features = features def featureCollection(self): return { "type": "FeatureCollection", "features": self.features } def __str__(self): return json.dumps(self.featureCollection()) def save(self, filename): f = open(filename, 'w') json.dump(self.featureCollection(), f, indent=2) f.close() class Point(Feature): def __init__(self, lat, lon, alt=0): Feature.__init__(self) self.geomType = "Point" self.lat = float(lat) self.lon = float(lon) self.alt = float(alt) # self.properties["marker"] = None self.properties["marker-color"] = "#aaaaaa" self.properties["marker-size"] = "medium" def coords(self, lonLat=False): if lonLat: return [self.lon, self.lat] return [self.lat, self.lon] # should be lon, lat for pure geojson. class Line(Feature): def __init__(self, start, end): Feature.__init__(self) self.geomType = "LineString" self.start = start self.end = end self.properties["stroke"] = "#aaaaaa" self.properties["strokeWidth"] = 1 self.properties["strokeOpacity"] = 1 def coords(self, lonLat=False): return [self.start.coords(lonLat), self.end.coords(lonLat)] def length(self): return distance(self.start, self.end) def bearing(self): return bearing(self.start, self.end) class LineString(Feature): def __init__(self, points): Feature.__init__(self) self.geomType = "LineString" self.points = points self.properties["stroke"] = "#aaaaaa" self.properties["strokeWidth"] = 1 self.properties["strokeOpacity"] = 1 def coords(self, lonLat=False): return list(map(lambda x: x.coords(lonLat), self.points)) def getLine(self, idx): COPYPROPS = ["name", "edge"] if idx < len(self.points) - 1: l = Line(self.points[idx], self.points[idx + 1]) for prop in COPYPROPS: if prop in self.points[idx].properties: l.setProp(prop, self.points[idx].properties[prop]) return l return None class Polygon(Feature): def __init__(self, p): Feature.__init__(self) self.geomType = "Polygon" self.coordinates = p self.properties["stroke"] = "#aaaaaa" self.properties["strokeWidth"] = 1 self.properties["strokeOpacity"] = 1 def coords(self, lonLat=False): return list(map(lambda x: x.coords(lonLat), self.coordinates)) @staticmethod def mkPolygon(lat1, lon1, lat2, lon2, width): p1 = Point(lat1, lon1) p2 = Point(lat2, lon2) brng = bearing(p1, p2) # one side of centerline brng = brng + 90 a0 = destination(p1, brng, width / 2) a2 = destination(p2, brng, width / 2) # other side of centerline brng = brng - 90 a1 = destination(p1, brng, width / 2) a3 = destination(p2, brng, width / 2) # join return Polygon([a0, a1, a3, a2]) def haversine(lat1, lat2, long1, long2): # in radians. dlat, dlong = lat2 - lat1, long2 - long1 return math.pow(math.sin(dlat / 2), 2) + math.cos(lat1) * math.cos(lat2) * math.pow(math.sin(dlong / 2), 2) def distance(p1, p2): # in degrees. lat1, lat2 = math.radians(p1.lat), math.radians(p2.lat) long1, long2 = math.radians(p1.lon), math.radians(p2.lon) a = haversine(lat1, lat2, long1, long2) return 2 * R * math.asin(math.sqrt(a)) # in m def bearing(src, dst): lat1 = math.radians(src.lat) lon1 = math.radians(src.lon) lat2 = math.radians(dst.lat) lon2 = math.radians(dst.lon) y = math.sin(lon2 - lon1) * math.cos(lat2) x = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1) t = math.atan2(y, x) brng = convertAngleTo360(math.degrees(t)) # in degrees return brng def destination(src, brngDeg, d): lat = math.radians(src.lat) lon = math.radians(src.lon) brng = math.radians(brngDeg) r = d / R lat2 = math.asin(math.sin(lat) * math.cos(r) + math.cos(lat) * math.sin(r) * math.cos(brng)) lon2 = lon + math.atan2(math.sin(brng) * math.sin(r) * math.cos(lat), math.cos(r) - math.sin(lat) * math.sin(lat2)) return Point(math.degrees(lat2), math.degrees(lon2)) def lineintersect(line1, line2): # Finds intersection of line1 and line2. Returns Point() of intersection or None. # !! Source code copied from GeoJSON code where coordinates are (longitude, latitude). x1 = line1.start.lon y1 = line1.start.lat x2 = line1.end.lon y2 = line1.end.lat x3 = line2.start.lon y3 = line2.start.lat x4 = line2.end.lon y4 = line2.end.lat denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1) numeA = (x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3) numeB = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3) if denom == 0: if numeA == 0 and numeB == 0: return None return None uA = numeA / denom uB = numeB / denom if uA >= 0 and uA <= 1 and uB >= 0 and uB <= 1: x = x1 + uA * (x2 - x1) y = y1 + uA * (y2 - y1) # return [x, y] # x is longitude, y is latitude. return Point(y, x) return None def nearestPointToLines(p, lines): # First the nearest point to a collection of lines. # Lines is an array if Line() # Returns the point and and distance to it. nearest = None dist = math.inf for line in lines: d1 = distance(p, line.start) d2 = distance(p, line.end) dl = max(d1, d2) brng = bearing(line.start, line.end) brng += 90 # perpendicular p1 = destination(p, brng, dl) brng -= 180 # perpendicular p2 = destination(p, brng, dl) perpendicular = Line(p1, p2) intersect = lineintersect(perpendicular, line) if intersect: d = distance(p, intersect) if d < dist: dist = d nearest = intersect return [nearest, distance] def pointInPolygon(point, polygon): # this will do. We do very local geometry (500m around current location) # pt is [x,y], pol is [[x,y],...]. pt = point.coords() pol = polygon.coords() inside = False for i in range(len(pol)): x0, y0 = pol[i] x1, y1 = pol[(i + 1) % len(pol)] if not min(y0, y1) < pt[1] <= max(y0, y1): continue if pt[0] < min(x0, x1): continue cur_x = x0 if x0 == x1 else x0 + (pt[1] - y0) * (x1 - x0) / (y1 - y0) inside ^= pt[0] > cur_x return inside # # Functions to smooth turns # # debugFeature = [] def debugF(f, n, c=None): f.setProp("name", n) if c: f.setProp("stroke", c) f.setProp("marker-color", c) debugFeature.append(f) def extendLine(line, dist): # Extends a line in each direction by distance meters brng = bearing(line.start, line.end) far0 = destination(line.end, brng, dist) far1 = destination(line.start, brng + 180, dist) return Line(Point(far0.lat, far0.lon), Point(far1.lat, far1.lon)) def lineOffset(line, offset): # Returns a line parallel to supplied line at offset meter distance. # Offset should be small (< 10km). Negative offset puts the line # on the other side. brng = bearing(line.start, line.end) if offset > 0: brng -= 90 else: brng += 90 d = abs(offset) far0 = destination(line.start, brng, d) far1 = destination(line.end, brng, d) return Line(Point(far0.lat, far0.lon), Point(far1.lat, far1.lon)) def lineArc(center, radius, bearing1, bearing2, steps=36): # Make a linestring arc from bearing1 to bearing2, centered on center, of radius radius. # Angle step size set to 360/steps. # Label first half of arc with idx, second half with idx+1. angle1 = convertAngleTo360(bearing1) angle2 = convertAngleTo360(bearing2) arcStartDegree = angle1 arcEndDegree = angle2 + 360 if angle1 < angle2: arcEndDegree = angle2 coordinates = [] rot = 360 / steps alfa = arcStartDegree while alfa < arcEndDegree: coordinates.append(destination(center, alfa, radius)) alfa += rot if alfa > arcEndDegree: coordinates.append(destination(center, arcEndDegree, radius)) return coordinates def arcCenter(l0, l1, radius): # returns arc center, always "inside" both lines l0 and l1 b_in = bearing(l0.start, l0.end) b_out = bearing(l1.start, l1.end) turnAngle = turn(b_in, b_out) oppositeTurnAngle = turn(b_out, b_in) l0b = lineOffset(l0, sign(turnAngle) * radius) # offset line is always on right side of line l1b = lineOffset(l1, sign(oppositeTurnAngle) * radius) return lineintersect(l0b, l1b) def
#!/usr/bin/python # Copyright (c) 2011-2012 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Module that contains unittests for validation_pool module.""" import contextlib import copy import functools import itertools import mox import os import pickle import sys import time import constants sys.path.insert(0, constants.SOURCE_ROOT) from chromite.buildbot import cbuildbot_results as results_lib from chromite.buildbot import repository from chromite.buildbot import validation_pool from chromite.lib import cros_build_lib from chromite.lib import cros_test_lib from chromite.lib import gerrit from chromite.lib import patch as cros_patch from chromite.lib import patch_unittest _GetNumber = iter(itertools.count()).next class MockPatch(mox.MockObject): owner = '<EMAIL>' def __eq__(self, other): return self.id == getattr(other, 'id') def GetTestJson(change_id=None): """Get usable fake Gerrit patch json data Args: change_id: If given, force this ChangeId """ data = copy.deepcopy(patch_unittest.FAKE_PATCH_JSON) if change_id is not None: data['id'] = str(change_id) return data class MockManifest(object): def __init__(self, path, **kwds): self.root = path for key, attr in kwds.iteritems(): setattr(self, key, attr) def GetProjectPath(self, project, absolute=False): if absolute: return os.path.join(self.root, project) return project def GetProjectsLocalRevision(self, _project): return 'refs/remotes/cros/master' # pylint: disable=W0212,R0904 class base(cros_test_lib.MoxTestCase): def setUp(self): self.mox.StubOutWithMock(validation_pool, '_RunCommand') self.mox.StubOutWithMock(time, 'sleep') self.mox.StubOutWithMock(cros_build_lib, 'TreeOpen') # Supress all gerrit access; having this occur is generally a sign # the code is either misbehaving, or the tests are bad. self.mox.StubOutWithMock(gerrit.GerritHelper, 'Query') self.mox.StubOutWithMock(gerrit.GerritHelper, '_SqlQuery') self._patch_counter = (itertools.count(1)).next self.build_root = 'fakebuildroot' def MockPatch(self, change_id=None, patch_number=None, is_merged=False, project='chromiumos/chromite', remote=constants.EXTERNAL_REMOTE, tracking_branch='refs/heads/master', approval_timestamp=0): # pylint: disable=W0201 # We have to use a custom mock class to fix some brain behaviour of # pymox where multiple separate mocks can easily equal each other # (or not; the behaviour varies depending on stubs used). patch = MockPatch(cros_patch.GerritPatch) self.mox._mock_objects.append(patch) patch.remote = remote patch.internal = (remote == constants.INTERNAL_REMOTE) if change_id is None: change_id = self._patch_counter() patch.gerrit_number = str(change_id) # Strip off the leading 0x, trailing 'l' change_id = hex(change_id)[2:].rstrip('L').lower() patch.change_id = patch.id = 'I%s' % change_id.rjust(40, '0') patch.patch_number = (patch_number if patch_number is not None else _GetNumber()) patch.url = 'fake_url/%s' % (change_id,) patch.project = project patch.sha1 = hex(_GetNumber())[2:].rstrip('L').lower().rjust(40, '0') patch.IsAlreadyMerged = lambda:is_merged patch.LookupAliases = functools.partial( self._LookupAliases, patch) patch.tracking_branch = tracking_branch patch.approval_timestamp = approval_timestamp return patch @staticmethod def _LookupAliases(patch): return [getattr(patch, x) for x in ('change_id', 'sha1', 'gerrit_number') if hasattr(patch, x)] def GetPatches(self, how_many=1, **kwargs): l = [self.MockPatch(**kwargs) for _ in xrange(how_many)] if how_many == 1: return l[0] return l def MakeHelper(self, cros_internal=None, cros=None): # pylint: disable=W0201 if cros_internal: cros_internal = self.mox.CreateMock(gerrit.GerritHelper) cros_internal.version = '2.1' cros_internal.remote = constants.INTERNAL_REMOTE if cros: cros = self.mox.CreateMock(gerrit.GerritHelper) cros.remote = constants.EXTERNAL_REMOTE cros.version = '2.1' return validation_pool.HelperPool(cros_internal=cros_internal, cros=cros) # pylint: disable=W0212,R0904 class TestPatchSeries(base): """Tests the core resolution and applying logic of validation_pool.ValidationPool.""" def setUp(self): # All tests should set their content merging projects via # SetContentMergingProjects since FindContentMergingProjects # requires admin rights in gerrit. self.mox.StubOutWithMock(gerrit.GerritHelper, 'FindContentMergingProjects') @staticmethod def SetContentMergingProjects(series, projects=(), remote=constants.EXTERNAL_REMOTE): helper = series._helper_pool.GetHelper(remote) series._content_merging_projects[helper] = frozenset(projects) @contextlib.contextmanager def _ValidateTransactionCall(self, _changes): yield def GetPatchSeries(self, helper_pool=None, force_content_merging=False): if helper_pool is None: helper_pool = self.MakeHelper(cros_internal=True, cros=True) series = validation_pool.PatchSeries(self.build_root, helper_pool, force_content_merging) # Suppress transactions. series._Transaction = self._ValidateTransactionCall return series def assertPath(self, _patch, return_value, path): self.assertEqual(path, os.path.join(self.build_root, _patch.project)) if isinstance(return_value, Exception): raise return_value return return_value def assertGerritDependencies(self, _patch, return_value, path, tracking): self.assertEqual(tracking, 'refs/remotes/cros/master') return self.assertPath(_patch, return_value, path) def SetPatchDeps(self, patch, parents=(), cq=()): patch.GerritDependencies = functools.partial( self.assertGerritDependencies, patch, parents) patch.PaladinDependencies = functools.partial( self.assertPath, patch, cq) patch.Fetch = functools.partial( self.assertPath, patch, patch.sha1) def _ValidatePatchApplyManifest(self, value): self.assertTrue(isinstance(value, MockManifest)) self.assertEqual(value.root, self.build_root) return True def SetPatchApply(self, patch, trivial=True): return patch.ApplyAgainstManifest( mox.Func(self._ValidatePatchApplyManifest), trivial=trivial) def assertResults(self, series, changes, applied=(), failed_tot=(), failed_inflight=(), frozen=True, dryrun=False): # Convenience; set the content pool as necessary. for remote in set(x.remote for x in changes): helper = series._helper_pool.GetHelper(remote) series._content_merging_projects.setdefault(helper, frozenset()) manifest = MockManifest(self.build_root) result = series.Apply(changes, dryrun=dryrun, frozen=frozen, manifest=manifest) _GetIds = lambda seq:[x.id for x in seq] _GetFailedIds = lambda seq: _GetIds(x.patch for x in seq) applied_result = _GetIds(result[0]) failed_tot_result, failed_inflight_result = map(_GetFailedIds, result[1:]) applied = _GetIds(applied) failed_tot = _GetIds(failed_tot) failed_inflight = _GetIds(failed_inflight) self.assertEqual( [applied, failed_tot, failed_inflight], [applied_result, failed_tot_result, failed_inflight_result]) return result def testApplyWithDeps(self): """Test that we can apply changes correctly and respect deps. This tests a simple out-of-order change where change1 depends on change2 but tries to get applied before change2. What should happen is that we should notice change2 is a dep of change1 and apply it first. """ series = self.GetPatchSeries() patch1, patch2 = patches = self.GetPatches(2) self.SetPatchDeps(patch2) self.SetPatchDeps(patch1, [patch2.id]) self.SetPatchApply(patch2) self.SetPatchApply(patch1) self.mox.ReplayAll() self.assertResults(series, patches, [patch2, patch1]) self.mox.VerifyAll() def testSha1Deps(self): """Test that we can apply changes correctly and respect sha1 deps. This tests a simple out-of-order change where change1 depends on change2 but tries to get applied before change2. What should happen is that we should notice change2 is a dep of change1 and apply it first. """ series = self.GetPatchSeries() patch1, patch2, patch3 = patches = self.GetPatches(3) patch2.change_id = patch2.id = patch2.sha1 patch3.change_id = patch3.id = '*' + patch3.sha1 patch3.remote = constants.INTERNAL_REMOTE self.SetPatchDeps(patch1, [patch2.sha1]) self.SetPatchDeps(patch2, ['*%s' % patch3.sha1]) self.SetPatchDeps(patch3) self.SetPatchApply(patch2) self.SetPatchApply(patch3) self.SetPatchApply(patch1) self.mox.ReplayAll() self.assertResults(series, patches, [patch3, patch2, patch1]) self.mox.VerifyAll() def testGerritNumberDeps(self): """Test that we can apply changes correctly and respect gerrit number deps. This tests a simple out-of-order change where change1 depends on change2 but tries to get applied before change2. What should happen is that we should notice change2 is a dep of change1 and apply it first. """ series = self.GetPatchSeries() patch1, patch2, patch3 = patches = self.GetPatches(3) self.SetPatchDeps(patch3, cq=[patch1.gerrit_number]) self.SetPatchDeps(patch2, cq=[patch3.gerrit_number]) self.SetPatchDeps(patch1, cq=[patch2.id]) self.SetPatchApply(patch3) self.SetPatchApply(patch2) self.SetPatchApply(patch1) self.mox.ReplayAll() self.assertResults(series, patches, [patch1, patch2, patch3]) self.mox.VerifyAll() def testGerritLazyMapping(self): """Given a patch lacking a gerrit number, via gerrit, map it to that change. Literally, this ensures that local patches pushed up- lacking a gerrit number- are mapped back to a changeid via asking gerrit for that number, then the local matching patch is used if available. """ series = self.GetPatchSeries() patch1 = self.MockPatch() del patch1.gerrit_number del patch1.url patch2 = self.MockPatch(change_id=int(patch1.change_id[1:])) patch3 = self.MockPatch() self.SetPatchDeps(patch3, cq=[patch2.gerrit_number]) self.SetPatchDeps(patch2) self.SetPatchDeps(patch1) self.SetPatchApply(patch1) self.SetPatchApply(patch3) self._SetQuery(series, patch2, query=patch2.gerrit_number, is_parent=False).AndReturn(patch2) self.mox.ReplayAll() applied = self.assertResults(series, [patch1, patch3], [patch3, patch1])[0] self.assertIs(applied[0], patch3) self.assertIs(applied[1], patch1) self.mox.VerifyAll() def testCrosGerritDeps(self): """Test that we can apply changes correctly and respect deps. This tests a simple out-of-order change where change1 depends on change2 but tries to get applied before change2. What should happen is that we should notice change2 is a dep of change1 and apply it first. """ series = self.GetPatchSeries() patch1 = self.MockPatch(remote=constants.EXTERNAL_REMOTE) patch2 = self.MockPatch(remote=constants.INTERNAL_REMOTE) patch3 = self.MockPatch(remote=constants.EXTERNAL_REMOTE) patches = [patch3, patch2, patch1] self.SetPatchDeps(patch1) self.SetPatchDeps(patch2, cq=[patch1.id]) self.SetPatchDeps(patch3, cq=[patch2.id]) self.SetPatchApply(patch1) self.SetPatchApply(patch2) self.SetPatchApply(patch3) self.mox.ReplayAll() self.assertResults(series, patches, patches) self.mox.VerifyAll() @staticmethod def _SetQuery(series, change, is_parent=False, query=None): helper = series._helper_pool.GetHelper(change.remote) query = change.id if query is None else query if is_parent: query = "project:%s AND branch:%s AND %s" % ( change.project, os.path.basename(change.tracking_branch), query) return helper.QuerySingleRecord(query, must_match=True) def testApplyMissingDep(self): """Test that we don't try to apply a change without met dependencies. Patch2 is in the validation pool that depends on Patch1 (which is not) Nothing should get applied. """ series = self.GetPatchSeries() patch1, patch2 = self.GetPatches(2) self.SetPatchDeps(patch2, [patch1.id]) self._SetQuery(series, patch1, is_parent=True).AndReturn(patch1) self.mox.ReplayAll() self.assertResults(series, [patch2], [], [patch2]) self.mox.VerifyAll() def testApplyWithCommittedDeps(self): """Test that we apply a change with dependency already committed.""" series = self.GetPatchSeries() # Use for basic commit check. patch1 = self.GetPatches(1, is_merged=True) patch2 = self.GetPatches(1) self.SetPatchDeps(patch2, [patch1.id]) self._SetQuery(series, patch1, is_parent=True).AndReturn(patch1) self.SetPatchApply(patch2) # Used to ensure that an uncommitted change put in the lookup cache # isn't invalidly pulled into the graph... patch3, patch4, patch5 = self.GetPatches(3) self._SetQuery(series, patch3, is_parent=True).AndReturn(patch3) self.SetPatchDeps(patch4, [patch3.id]) self.SetPatchDeps(patch5, [patch3.id]) self.mox.ReplayAll() self.assertResults(series, [patch2, patch4, patch5], [patch2], [patch4, patch5]) self.mox.VerifyAll() def testCyclicalDeps(self): """Verify that the machinery handles cycles correctly.""" series = self.GetPatchSeries() patch1, patch2 = patches = self.GetPatches(2) self.SetPatchDeps(patch1, [patch1.id]) self.SetPatchDeps(patch2, cq=[patch1.id]) self.SetPatchApply(patch2) self.SetPatchApply(patch1) self.mox.ReplayAll() self.assertResults(series, patches, [patch2, patch1]) def testApplyPartialFailures(self): """Test that can apply changes correctly when one change fails to apply. This tests a simple change order where 1 depends on 2 and 1 fails to apply. Only 1 should get tried as 2 will abort once it sees that 1 can't be applied. 3 with no dependencies should go through fine. Since patch1 fails to apply, we should also get a call to handle the failure. """ series
self.device_type == 'server' or self.phy.device_type \ == 'server' def is_l3device(self): """Layer 3 devices: router, server, cloud, host ie not switch """ return self.is_router() or self.is_server() def __getitem__(self, key): """Get item key""" return OverlayNode(self.anm, key, self.node_id) @property def asn(self): """Returns ASN of this node""" #TODO: make a function (not property) try: return self._graph.node[self.node_id]['asn'] # not in this graph except KeyError: # try from phy try: return self.anm.overlay_nx_graphs['phy' ].node[self.node_id]['asn'] except KeyError: if self.node_id not in self.anm.overlay_nx_graphs['phy' ]: message = \ 'Node id %s not found in physical overlay' \ % self.node_id if self.overlay_id == 'input': # don't warn, most likely node not copied across log.debug(message) else: log.warning(message) return @asn.setter def asn(self, value): #TODO: make a function (not property) # TODO: double check this logic try: self.anm.overlay_nx_graphs['phy'].node[self.node_id]['asn' ] = value except KeyError: # set ASN directly on the node, eg for collision domains self._graph.node[self.node_id]['asn'] = value @property def id(self): """Returns node id""" return self.node_id @property def _overlay(self): """Access overlay graph for this node""" return OverlayGraph(self.anm, self.overlay_id) def degree(self): """Returns degree of node""" return self._graph.degree(self.node_id) def neighbors(self, *args, **kwargs): """Returns neighbors of node""" neighs = self._overlay.neighbors(self) return self._overlay.filter(neighs, *args, **kwargs) def neighbor_interfaces(self, *args, **kwargs): # TODO: implement filtering for args and kwargs if len(args) or len(kwargs): log.warning('Attribute-based filtering not currently supported for neighbor_interfaces' ) return iter(edge.dst_int for edge in self.edges()) @property #TODO: make a function to reflect dynamic nature: constructed from other attributes def label(self): """Returns node label (mapped from ANM)""" return self.__repr__() @property def phy(self): """Shortcut back to physical OverlayNode Same as node.overlay.phy ie node.phy.x is same as node.overlay.phy.x """ #TODO: do we need this with node['phy']? # refer back to the physical node, to access attributes such as name return OverlayNode(self.anm, 'phy', self.node_id) def dump(self): """Dump attributes of this node""" data = dict(self._graph.node[self.node_id]) del data['_interfaces'] return str(data) def edges(self, *args, **kwargs): """Edges to/from this node""" return list(self._overlay.edges(self, *args, **kwargs)) def __str__(self): return str(self.__repr__()) def __repr__(self): """Try label if set in overlay, otherwise from physical, otherwise node id""" try: return self.anm.node_label(self) except KeyError: try: return self._graph.node[self.node_id]['label'] except KeyError: return self.node_id # node not in physical graph def __getattr__(self, key): """Returns node property This is useful for accesing attributes passed through from graphml""" try: node_data = self._graph.node[self.node_id] except KeyError: #TODO: only carry out this logic if "Strict mode" if key == "device_type": #TODO: tidy accessors so this doesn't occur, and remove the suppress pass # supress return self.log.debug("Cannot access %s: node not present in %s" %(key, self.overlay_id)) return else: try: result = node_data[key] return result except KeyError: if key == "device_type": #TODO: tidy accessors so this doesn't occur, and remove the suppress pass # supress return # from http://stackoverflow.com/q/2654113 self.log.debug("Accessing unset attribute %s in %s" % (key, self.overlay_id)) #import inspect #caller = ", ".join(reversed([elem[3] for elem in inspect.stack()[1:-4]])) #self.log.debug("Caller: %s" % caller) return def get(self, key): """For consistency, node.get(key) is neater than getattr(node, key)""" return getattr(self, key) def __setattr__(self, key, val): """Sets node property This is useful for accesing attributes passed through from graphml""" # TODO: look at mapping the object __dict__ straight to the graph.node[self.node_id] # TODO: fix wrt using @x.setter won't work due to following: # as per # http://docs.python.org/2/reference/datamodel.html#customizing-attribute-access # TODO: fix workaround for asn if key == 'asn': object.__setattr__(self, 'asn', val) # calls @asn.setter try: self._graph.node[self.node_id][key] = val except KeyError: self._graph.add_node(self.node_id) self.set(key, val) def set(self, key, val): """For consistency, node.set(key, value) is neater than setattr(node, key, value)""" return self.__setattr__(key, val) @total_ordering class OverlayEdge(object): """API to access link in network""" def __init__( self, anm, overlay_id, src_id, dst_id, ): # Set using this method to bypass __setattr__ object.__setattr__(self, 'anm', anm) object.__setattr__(self, 'overlay_id', overlay_id) object.__setattr__(self, 'src_id', src_id) object.__setattr__(self, 'dst_id', dst_id) logger = logging.getLogger("ANK") logstring = "Interface: %s" % str(self) logger = CustomAdapter(logger, {'item': logstring}) object.__setattr__(self, 'log', logger) def __key(self): """Note: key doesn't include overlay_id to allow fast cross-layer comparisons""" # based on http://stackoverflow.com/q/2909106 return (self.src_id, self.dst_id) def __hash__(self): """""" return hash(self.__key()) def __eq__(self, other): """""" try: return (self.src_id, self.dst_id) == (other.src_id, other.dst_id) except AttributeError: return self.node_id == other def __repr__(self): """String of node""" return '%s: (%s, %s)' % (self.overlay_id, self.src, self.dst) def __getitem__(self, key): """""" overlay = OverlayGraph(self.anm, key) return overlay.edge(self) def __lt__(self, other): """""" return (self.src.node_id, self.dst.node_id) \ < (other.src.node_id, other.dst.node_id) @property def src(self): """Source node of edge""" return OverlayNode(self.anm, self.overlay_id, self.src_id) @property def dst(self): """Destination node of edge""" return OverlayNode(self.anm, self.overlay_id, self.dst_id) def apply_to_interfaces(self, attribute): val = self.__getattr__(attribute) self.src_int.__setattr__(attribute, val) self.dst_int.__setattr__(attribute, val) @property def src_int(self): """Interface bound to source node of edge""" src_int_id = self._interfaces[self.src_id] return overlay_interface(self.anm, self.overlay_id, self.src_id, src_int_id) @property def dst_int(self): """Interface bound to destination node of edge""" dst_int_id = self._interfaces[self.dst_id] return overlay_interface(self.anm, self.overlay_id, self.dst_id, dst_int_id) #TODO: see if these are still used def attr_equal(self, *args): """Return edges which both src and dst have attributes equal""" return all(getattr(self.src, key) == getattr(self.dst, key) for key in args) def attr_both(self, *args): """Return edges which both src and dst have attributes set""" return all(getattr(self.src, key) and getattr(self.dst, key) for key in args) def attr_any(self, *args): """Return edges which either src and dst have attributes set""" return all(getattr(self.src, key) or getattr(self.dst, key) for key in args) def dump(self): return str(self._graph[self.src_id][self.dst_id]) def __nonzero__(self): """Allows for checking if edge exists """ return self._graph.has_edge(self.src_id, self.dst_id) def bind_interface(self, node, interface): """Bind this edge to specified index""" self._interfaces[node.id] = interface def interfaces(self): # TODO: warn if interface doesn't exist on node return iter(overlay_interface(self.anm, self.overlay_id, node_id, interface_id) for (node_id, interface_id) in self._interfaces.items()) @property def _graph(self): """Return graph the node belongs to""" return self.anm.overlay_nx_graphs[self.overlay_id] def get(self, key): """For consistency, edge.get(key) is neater than getattr(edge, key)""" return self.__getattr__(key) def set(self, key, val): """For consistency, edge.set(key, value) is neater than setattr(edge, key, value)""" return self.__setattr__(key, val) def __getattr__(self, key): """Returns edge property""" return self._graph[self.src_id][self.dst_id].get(key) def __setattr__(self, key, val): """Sets edge property""" self._graph[self.src_id][self.dst_id][key] = val class OverlayGraphData(object): """API to access link in network""" def __init__(self, anm, overlay_id): # Set using this method to bypass __setattr__ object.__setattr__(self, 'anm', anm) object.__setattr__(self, 'overlay_id', overlay_id) def __repr__(self): """""" return 'Data for (%s, %s)' % (self.anm, self.overlay_id) def dump(self): """""" print str(self._graph.graph) @property def _graph(self): # access underlying graph for this OverlayNode return self.anm.overlay_nx_graphs[self.overlay_id] def __getattr__(self, key): return self._graph.graph.get(key) def __setattr__(self, key, val): self._graph.graph[key] = val def __getitem__(self, key): """""" return self._graph.graph.get(key) def __setitem__(self, key, val): """""" self._graph.graph[key] = val class OverlayBase(object): '''Base class for overlays - overlay graphs, subgraphs, projections, etc''' def __init__(self, anm, overlay_id): """""" if overlay_id not in anm.overlay_nx_graphs: raise OverlayNotFound(overlay_id) #TODO: return False instead? self._anm = anm self._overlay_id = overlay_id logger = logging.getLogger("ANK") logstring = "Overlay: %s" % str(overlay_id) logger = CustomAdapter(logger, {'item': logstring}) object.__setattr__(self, 'log', logger) def __repr__(self): """""" return self._overlay_id @property def data(self): """Returns data stored on this overlay graph""" return OverlayGraphData(self._anm, self._overlay_id) def __contains__(self, n): """""" try: return n.node_id in self._graph except AttributeError: # try with node_id as a string return n in self._graph def interface(self, interface): """""" return overlay_interface(self._anm, self._overlay_id, interface.node_id, interface.interface_id) def edge(self, edge_to_find, dst_to_find=None): '''returns edge in this graph with same src and dst''' if isinstance(edge_to_find, OverlayEdge): src_id = edge_to_find.src dst_id = edge_to_find.dst # TODO: add MultiGraph support in terms of key here for (src, dst) in self._graph.edges_iter(src_id): if dst == dst_id: return OverlayEdge(self._anm, self._overlay_id, src, dst) # TODO: tidy this logic up try: src = edge_to_find dst = dst_to_find src.lower() dst.lower() if self._graph.has_edge(src, dst): return OverlayEdge(self._anm, self._overlay_id, src, dst) except AttributeError: pass # not strings except TypeError: pass try: if dst_to_find: src_id = edge_to_find.node_id search_id = dst_to_find.node_id else: log.warning("Searching by edge_id has been deprecated") except AttributeError: src_id = None search_id = edge_to_find for (src, dst) in self._graph.edges_iter(src_id): try: if (src, dst) == (src_id, search_id): # searching by nodes return OverlayEdge(self._anm, self._overlay_id, src, dst) except KeyError: pass # def __getitem__(self, key): """""" return self.node(key) def node(self, key): """Returns node based on name This is currently O(N). Could use a lookup table""" try: if key.node_id in self._graph: return OverlayNode(self._anm, self._overlay_id, key.node_id) except AttributeError: # doesn't have
nev_fname + '.hdf' if not os.path.isfile(nev_hdf_fname): # convert .nev file to hdf file using Blackrock's n2h5 utility subprocess.call(['n2h5', nev_fname, nev_hdf_fname]) else: nev_hdf_fname = nev_hdf_fname[0] try: nev_hdf = h5py.File(nev_hdf_fname, 'r') open_method = 1 except: import tables nev_hdf = tables.openFile(nev_hdf_fname) open_method = 2 #print 'open method 2' n_bins = len(interp_rows) n_units = units.shape[0] spike_counts = np.zeros((n_bins, n_units)) for i in range(n_units): chan = units[i, 0] # 1-based numbering (comes from web interface) unit = units[i, 1] chan_str = str(chan).zfill(5) path = 'channel/channel%s/spike_set' % chan_str if open_method == 1: ts = nev_hdf.get(path).value['TimeStamp'] # the units corresponding to each timestamp in ts # 0-based numbering (comes from .nev file), so add 1 units_ts = nev_hdf.get(path).value['Unit'] elif open_method == 2: try: grp = nev_hdf.getNode('/'+path) ts = grp[:]['TimeStamp'] units_ts = grp[:]['Unit'] except: print(('no spikes recorded on channel: ', chan_str, ': adding zeros')) ts = [] units_ts = [] # get the ts for this unit, in units of secs fs = 30000. ts = [t/fs for idx, (t, u_t) in enumerate(zip(ts, units_ts)) if u_t == unit] # insert value interp_rows[0]-step to beginning of interp_rows array interp_rows_ = np.insert(interp_rows, 0, interp_rows[0]-step) # use ts to fill in the spike_counts that corresponds to unit i spike_counts[:, i] = np.histogram(ts, interp_rows_)[0] # discard units that never fired at all if 'keep_zero_units' in extractor_kwargs: print('keeping zero firing units') else: unit_inds, = np.nonzero(np.sum(spike_counts, axis=0)) units = units[unit_inds,:] spike_counts = spike_counts[:, unit_inds] extractor_kwargs['units'] = units return spike_counts, units, extractor_kwargs elif 'ripple' in files: nev_fname = [name for name in files['ripple'] if '.nev' in name][0] # only one of them nevfile = pyns.NSFile(nev_fname) # interpolate between the rows to 180 Hz if binlen < 1./strobe_rate: interp_rows = [] neurows = np.hstack([neurows[0] - 1./strobe_rate, neurows]) for r1, r2 in zip(neurows[:-1], neurows[1:]): interp_rows += list(np.linspace(r1, r2, 4)[1:]) interp_rows = np.array(interp_rows) else: step = int(binlen/(1./strobe_rate)) # Downsample kinematic data according to decoder bin length (assumes non-overlapping bins) interp_rows = neurows[::step] electrode_list = units[:,0] # first column is electrode numbers # access spike data for all electrodes indicated in units array spike_entities = [e for e in nevfile.get_entities() if e.entity_type ==3] spike_entities = [e for e in spike_entities if int(e.label[4:]) in electrode_list] print('checkpoint1') print(units) print(units.shape[0]) print(electrode_list) # there is one entity per electrode. now extract spike times and ids to do binning. # spike_counts should be units x time n_bins = len(interp_rows) n_units = units.shape[0] spike_counts = np.zeros((n_bins, n_units)) # insert value interp_rows[0]-step to beginning of interp_rows array interp_rows_ = np.insert(interp_rows, 0, interp_rows[0]-step) i = 0 print('checkpoint2') print(n_bins) print(interp_rows_) for entity in spike_entities: # placeholder matrix: spike count x 2, holds spike time in first column and spike id in second column spike_data = np.zeros((entity.item_count, 2)) elec = int(entity.label[4:]) # electrode number elec_uids = units[units[:,0]==elec,1] # units on this electrode to be included print(entity) print(elec) print(elec_uids) for item in range(0,entity.item_count): spike_data[item,0], data, spike_data[item,1] = entity.get_segment_data(item) # check which spike data will be used for uid in elec_uids: ts = spike_data[spike_data[:,1]==uid,0] print(ts) spike_counts[:, i] = np.histogram(ts, interp_rows_)[0] i += 1 # discard units that never fired at all if 'keep_zero_units' in extractor_kwargs: print('keeping zero firing units') else: print(np.sum(spike_counts, axis=0)) unit_inds, = np.nonzero(np.sum(spike_counts, axis=0)) print(unit_inds) units = units[unit_inds,:] spike_counts = spike_counts[:, unit_inds] extractor_kwargs['units'] = units print('File Extractor.py') print(units) return spike_counts, units, extractor_kwargs elif 'tdt' in files: raise NotImplementedError # bands should be a list of tuples representing ranges # e.g., bands = [(0, 10), (10, 20), (130, 140)] for 0-10, 10-20, and 130-140 Hz start = 0 end = 150 step = 10 default_bands = [] for freq in range(start, end, step): default_bands.append((freq, freq+step)) class LFPMTMPowerExtractor(object): ''' Computes log power of the LFP in different frequency bands (for each channel) in freq-domain using the multi-taper method. ''' feature_type = 'lfp_power' def __init__(self, source, channels=[], bands=default_bands, win_len=0.2, NW=3, fs=1000, **kwargs): ''' Constructor for LFPMTMPowerExtractor, which extracts LFP power using the multi-taper method Parameters ---------- source : riglib.source.Source object Object which yields new data when its 'get' method is called channels : list LFP electrode indices to use for feature extraction bands : list of tuples Each tuple defines a frequency band of interest as (start frequency, end frequency) Returns ------- LFPMTMPowerExtractor instance ''' #self.feature_dtype = ('lfp_power', 'f8', (len(channels)*len(bands), 1)) self.source = source self.channels = channels self.bands = bands self.win_len = win_len self.NW = NW if source is not None: self.fs = source.source.update_freq else: self.fs = fs extractor_kwargs = dict() extractor_kwargs['channels'] = self.channels extractor_kwargs['bands'] = self.bands extractor_kwargs['win_len'] = self.win_len extractor_kwargs['NW'] = self.NW extractor_kwargs['fs'] = self.fs extractor_kwargs['no_log'] = 'no_log' in kwargs and kwargs['no_log']==True #remove log calculation extractor_kwargs['no_mean'] = 'no_mean' in kwargs and kwargs['no_mean']==True #r self.extractor_kwargs = extractor_kwargs self.n_pts = int(self.win_len * self.fs) self.nfft = 2**int(np.ceil(np.log2(self.n_pts))) # nextpow2(self.n_pts) fft_freqs = np.arange(0., fs, float(fs)/self.nfft)[:int(self.nfft/2) + 1] #fft_freqs = np.arange(0., fs, float(fs)/self.nfft)[:self.nfft/2 + 1] self.fft_inds = dict() for band_idx, band in enumerate(bands): self.fft_inds[band_idx] = [freq_idx for freq_idx, freq in enumerate(fft_freqs) if band[0] <= freq < band[1]] extractor_kwargs['fft_inds'] = self.fft_inds extractor_kwargs['fft_freqs'] = fft_freqs self.epsilon = 1e-9 if extractor_kwargs['no_mean']: #Used in lfp 1D control task self.feature_dtype = ('lfp_power', 'f8', (len(channels)*len(fft_freqs), 1)) else: self.feature_dtype = ('lfp_power', 'f8', (len(channels)*len(bands), 1)) def get_cont_samples(self, *args, **kwargs): ''' Retreives the last n_pts number of samples for each LPF channel from the neural data 'source' Parameters ---------- *args, **kwargs : optional arguments Ignored for this extractor (not necessary) Returns ------- np.ndarray of shape ??? ''' return self.source.get(self.n_pts, self.channels) def extract_features(self, cont_samples): ''' Extract spectral features from a block of time series samples Parameters ---------- cont_samples : np.ndarray of shape (n_channels, n_samples) Raw voltage time series (one per channel) from which to extract spectral features Returns ------- lfp_power : np.ndarray of shape (n_channels * n_features, 1) Multi-band power estimates for each channel, for each band specified when the feature extractor was instantiated. ''' psd_est = tsa.multi_taper_psd(cont_samples, Fs=self.fs, NW=self.NW, jackknife=False, low_bias=True, NFFT=self.nfft)[1] if ('no_mean' in self.extractor_kwargs) and (self.extractor_kwargs['no_mean'] is True): return psd_est.reshape(psd_est.shape[0]*psd_est.shape[1], 1) else: # compute average power of each band of interest n_chan = len(self.channels) lfp_power = np.zeros((n_chan * len(self.bands), 1)) for idx, band in enumerate(self.bands): if self.extractor_kwargs['no_log']: lfp_power[idx*n_chan : (idx+1)*n_chan, 0] = np.mean(psd_est[:, self.fft_inds[idx]], axis=1) else: lfp_power[idx*n_chan : (idx+1)*n_chan, 0] = np.mean(np.log10(psd_est[:, self.fft_inds[idx]] + self.epsilon), axis=1) return lfp_power def __call__(self, start_time, *args, **kwargs): ''' Parameters ---------- start_time : float Absolute time from the task event loop. This is unused by LFP extractors in their current implementation and only passed in to ensure that function signatures are the same across extractors. *args, **kwargs : optional positional/keyword arguments These are passed to the source, or ignored (not needed for this extractor). Returns ------- dict Extracted features to be saved in the task. ''' cont_samples = self.get_cont_samples(*args, **kwargs) # dims of channels x time print(cont_samples) lfp_power = self.extract_features(cont_samples['samples']) return dict(lfp_power=lfp_power) @classmethod def extract_from_file(cls, files, neurows, binlen, units, extractor_kwargs, strobe_rate=60.0): ''' Compute binned spike count features Parameters ---------- files : dict Data files used to train the decoder. Should contain exactly one type of neural data file (e.g., Plexon, Blackrock, TDT) neurows: np.ndarray of shape (T,) Timestamps in the plexon time reference corresponding to bin boundaries binlen: float Length of time over which to sum spikes from the specified cells units: np.ndarray of shape (N, 2) List of units that the decoder will be trained on. The first column specifies the electrode number and the second specifies the unit on the electrode extractor_kwargs: dict Any additional parameters to be passed to the feature extractor. This function is agnostic to the actual extractor utilized strobe_rate: 60.0 The rate at which the task sends the sync pulse to the plx file Returns ------- spike_counts : np.ndarray of shape (N, T) Spike counts binned over the length of the datafile. units : Not used by this type of extractor, just passed back from the input argument
not None} request_kwargs = { "url": url, "params": params, } # pylint: disable-next=protected-access with self.client._get_httpx_client() as httpx_client: response = httpx_client.get( **request_kwargs, ) if self.skip_response_parsing: return response if response.status_code == 200: response200 = [] _response200 = response.json() for response200_item_data in _response200: response200_item = TeamMember.parse_obj(response200_item_data) response200.append(response200_item) return response200 return response def add_team_member( self, team_id: str, *, json_body: Union[AddTeamMemberJsonBody, Dict], ) -> TeamMember: """Add user to team Add user to the team by user_id. Permissions: Must be authenticated and team be open to add self. For adding another user, authenticated user must have the `add_user_to_team` permission. Api Reference: `AddTeamMember <https://api.mattermost.com/#operation/AddTeamMember>`_ """ url = f"/teams/{team_id}/members" if isinstance(json_body, BaseModel): json_json_body = json_body.dict(exclude_unset=True) else: json_json_body = json_body request_kwargs = { "url": url, "json": json_json_body, } # pylint: disable-next=protected-access with self.client._get_httpx_client() as httpx_client: response = httpx_client.post( **request_kwargs, ) if self.skip_response_parsing: return response if response.status_code == 201: response201 = TeamMember.parse_obj(response.json()) return response201 return response def add_team_member_from_invite( self, *, token: str, ) -> TeamMember: """Add user to team from invite Using either an invite id or hash/data pair from an email invite link, add a user to a team. Permissions: Must be authenticated. Api Reference: `AddTeamMemberFromInvite <https://api.mattermost.com/#operation/AddTeamMemberFromInvite>`_ """ url = "/teams/members/invite" params: Dict[str, Any] = { "token": token, } params = {k: v for k, v in params.items() if v is not None} request_kwargs = { "url": url, "params": params, } # pylint: disable-next=protected-access with self.client._get_httpx_client() as httpx_client: response = httpx_client.post( **request_kwargs, ) if self.skip_response_parsing: return response if response.status_code == 201: response201 = TeamMember.parse_obj(response.json()) return response201 return response def add_team_members( self, team_id: str, *, json_body: Union[List[TeamMember], Dict], graceful: Optional[bool] = None, ) -> List[TeamMember]: """Add multiple users to team Add a number of users to the team by user_id. Permissions: Must be authenticated. Authenticated user must have the `add_user_to_team` permission. Api Reference: `AddTeamMembers <https://api.mattermost.com/#operation/AddTeamMembers>`_ """ url = f"/teams/{team_id}/members/batch" params: Dict[str, Any] = { "graceful": graceful, } params = {k: v for k, v in params.items() if v is not None} json_json_body = [] for json_body_item_data in json_body: if isinstance(json_body_item_data, BaseModel): json_body_item = json_body_item_data.dict(exclude_unset=True) else: json_body_item = json_body_item_data json_json_body.append(json_body_item) request_kwargs = { "url": url, "json": json_json_body, "params": params, } # pylint: disable-next=protected-access with self.client._get_httpx_client() as httpx_client: response = httpx_client.post( **request_kwargs, ) if self.skip_response_parsing: return response if response.status_code == 201: response201 = [] _response201 = response.json() for response201_item_data in _response201: response201_item = TeamMember.parse_obj(response201_item_data) response201.append(response201_item) return response201 return response def get_team_members_for_user( self, user_id: str, ) -> List[TeamMember]: """Get team members for a user Get a list of team members for a user. Useful for getting the ids of teams the user is on and the roles they have in those teams. Permissions: Must be logged in as the user or have the `edit_other_users` permission. Api Reference: `GetTeamMembersForUser <https://api.mattermost.com/#operation/GetTeamMembersForUser>`_ """ url = f"/users/{user_id}/teams/members" request_kwargs = { "url": url, } # pylint: disable-next=protected-access with self.client._get_httpx_client() as httpx_client: response = httpx_client.get( **request_kwargs, ) if self.skip_response_parsing: return response if response.status_code == 200: response200 = [] _response200 = response.json() for response200_item_data in _response200: response200_item = TeamMember.parse_obj(response200_item_data) response200.append(response200_item) return response200 return response def get_team_member( self, team_id: str, user_id: str, ) -> TeamMember: """Get a team member Get a team member on the system. Permissions: Must be authenticated and have the `view_team` permission. Api Reference: `GetTeamMember <https://api.mattermost.com/#operation/GetTeamMember>`_ """ url = f"/teams/{team_id}/members/{user_id}" request_kwargs = { "url": url, } # pylint: disable-next=protected-access with self.client._get_httpx_client() as httpx_client: response = httpx_client.get( **request_kwargs, ) if self.skip_response_parsing: return response if response.status_code == 200: response200 = TeamMember.parse_obj(response.json()) return response200 return response def remove_team_member( self, team_id: str, user_id: str, ) -> StatusOK: """Remove user from team Delete the team member object for a user, effectively removing them from a team. Permissions: Must be logged in as the user or have the `remove_user_from_team` permission. Api Reference: `RemoveTeamMember <https://api.mattermost.com/#operation/RemoveTeamMember>`_ """ url = f"/teams/{team_id}/members/{user_id}" request_kwargs = { "url": url, } # pylint: disable-next=protected-access with self.client._get_httpx_client() as httpx_client: response = httpx_client.delete( **request_kwargs, ) if self.skip_response_parsing: return response if response.status_code == 200: response200 = StatusOK.parse_obj(response.json()) return response200 return response def get_team_members_by_ids( self, team_id: str, *, json_body: Union[List[str], Dict], ) -> List[TeamMember]: """Get team members by ids Get a list of team members based on a provided array of user ids. Permissions: Must have `view_team` permission for the team. Api Reference: `GetTeamMembersByIds <https://api.mattermost.com/#operation/GetTeamMembersByIds>`_ """ url = f"/teams/{team_id}/members/ids" json_json_body = json_body request_kwargs = { "url": url, "json": json_json_body, } # pylint: disable-next=protected-access with self.client._get_httpx_client() as httpx_client: response = httpx_client.post( **request_kwargs, ) if self.skip_response_parsing: return response if response.status_code == 200: response200 = [] _response200 = response.json() for response200_item_data in _response200: response200_item = TeamMember.parse_obj(response200_item_data) response200.append(response200_item) return response200 return response def get_team_stats( self, team_id: str, ) -> TeamStats: """Get a team stats Get a team stats on the system. Permissions: Must be authenticated and have the `view_team` permission. Api Reference: `GetTeamStats <https://api.mattermost.com/#operation/GetTeamStats>`_ """ url = f"/teams/{team_id}/stats" request_kwargs = { "url": url, } # pylint: disable-next=protected-access with self.client._get_httpx_client() as httpx_client: response = httpx_client.get( **request_kwargs, ) if self.skip_response_parsing: return response if response.status_code == 200: response200 = TeamStats.parse_obj(response.json()) return response200 return response def regenerate_team_invite_id( self, team_id: str, ) -> Team: """Regenerate the Invite ID from a Team Regenerates the invite ID used in invite links of a team Permissions: Must be authenticated and have the `manage_team` permission. Api Reference: `RegenerateTeamInviteId <https://api.mattermost.com/#operation/RegenerateTeamInviteId>`_ """ url = f"/teams/{team_id}/regenerate_invite_id" request_kwargs = { "url": url, } # pylint: disable-next=protected-access with self.client._get_httpx_client() as httpx_client: response = httpx_client.post( **request_kwargs, ) if self.skip_response_parsing: return response if response.status_code == 200: response200 = Team.parse_obj(response.json()) return response200 return response def get_team_icon( self, team_id: str, ) -> None: """Get the team icon Get the team icon of the team. Permissions: User must be authenticated. In addition, team must be open or the user must have the `view_team` permission. Minimum Server Version: 4.9 Api Reference: `GetTeamIcon <https://api.mattermost.com/#operation/GetTeamIcon>`_ """ url = f"/teams/{team_id}/image" request_kwargs = { "url": url, } # pylint: disable-next=protected-access with self.client._get_httpx_client() as httpx_client: response = httpx_client.get( **request_kwargs, ) if self.skip_response_parsing: return response return response def set_team_icon( self, team_id: str, *, multipart_data: Union[SetTeamIconMultipartData, Dict], ) -> StatusOK: """Sets the team icon Sets the team icon for the team. Permissions: Must be authenticated and have the `manage_team` permission. Minimum Server Version: 4.9 Api Reference: `SetTeamIcon <https://api.mattermost.com/#operation/SetTeamIcon>`_ """ url = f"/teams/{team_id}/image" multipart_body_data = SetTeamIconMultipartData.parse_obj(multipart_data) request_kwargs = { "url": url, "data": multipart_body_data.get_data(), "files": multipart_body_data.get_files(), } # pylint: disable-next=protected-access with self.client._get_httpx_client() as httpx_client: response = httpx_client.post( **request_kwargs, ) if self.skip_response_parsing: return response if response.status_code == 200: response200 = StatusOK.parse_obj(response.json()) return response200 return response def remove_team_icon( self, team_id: str, ) -> StatusOK: """Remove the team icon Remove the team icon for the team. Permissions: Must be authenticated and have the `manage_team` permission. Minimum Server Version: 4.10 Api Reference: `RemoveTeamIcon <https://api.mattermost.com/#operation/RemoveTeamIcon>`_ """ url = f"/teams/{team_id}/image" request_kwargs = { "url": url, } # pylint: disable-next=protected-access with self.client._get_httpx_client() as httpx_client: response = httpx_client.delete( **request_kwargs, ) if self.skip_response_parsing: return response if response.status_code == 200: response200 = StatusOK.parse_obj(response.json()) return response200 return response def update_team_member_roles( self, team_id: str, user_id: str, *, json_body: Union[UpdateTeamMemberRolesJsonBody, Dict], ) -> StatusOK: """Update a team member roles Update a team member roles. Valid team roles are \"team_user\", \"team_admin\" or both of them. Overwrites any previously assigned team roles. Permissions: Must be authenticated and have the `manage_team_roles` permission. Api Reference: `UpdateTeamMemberRoles <https://api.mattermost.com/#operation/UpdateTeamMemberRoles>`_ """ url = f"/teams/{team_id}/members/{user_id}/roles" if isinstance(json_body, BaseModel): json_json_body = json_body.dict(exclude_unset=True) else: json_json_body = json_body request_kwargs = { "url": url, "json": json_json_body, } # pylint: disable-next=protected-access with self.client._get_httpx_client() as httpx_client: response = httpx_client.put( **request_kwargs, ) if self.skip_response_parsing: return response if response.status_code == 200: response200 = StatusOK.parse_obj(response.json()) return response200 return response def update_team_member_scheme_roles( self, team_id: str, user_id: str, *, json_body: Union[UpdateTeamMemberSchemeRolesJsonBody, Dict], ) -> StatusOK: """Update the scheme-derived roles of a team member. Update a team member's scheme_admin/scheme_user properties. Typically this should either be `scheme_admin=false, scheme_user=true` for ordinary team member, or `scheme_admin=true, scheme_user=true` for a team admin. Permissions: Must be authenticated and have the `manage_team_roles` permission. Minimum Server Version: 5.0
import json import pickle import numpy as np import random # from fairseq.data import Dictionary import sys import torch import argparse import os from model_pretrain import Plain_bert from fairseq.models.roberta import RobertaModel # from utils_sample import NewsIterator # from utils_sample import cal_metric from fairseq import utils as fairseq_utils import utils_pretrain as utils # import dgl # import dgl.function as fn #from gpu_mem_track import MemTracker #import inspect #from multiprocessing import Pool import torch.nn as nn import math from fairseq.data import ( data_utils, Dictionary, IdDataset, MaskTokensDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, PadDataset, PrependTokenDataset, SortDataset, TokenBlockDataset, ) import torch.nn.functional as F from torch.utils.tensorboard import SummaryWriter from apex.parallel import DistributedDataParallel as DDP import apex from apex import amp import torch.multiprocessing as mp import torch.distributed as dist import pynvml random.seed(1) np.random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed(1) #cudaid=0 metrics=['group_auc','mean_mrr','ndcg@5;10'] lr=1e-4 T_warm=10000 all_iteration=1000000 def parse_args(parser): parser.add_argument("--data_dir", type=str, help="local_rank for distributed training on gpus") parser.add_argument("--save_dir", type=str, help="local_rank for distributed training on gpus") parser.add_argument("--data_file", type=str, help="local_rank for distributed training on gpus") parser.add_argument("--test_data_file", type=str, help="local_rank for distributed training on gpus") parser.add_argument("--feature_file", type=str, help="local_rank for distributed training on gpus") parser.add_argument("--test_feature_file", type=str, help="local_rank for distributed training on gpus") parser.add_argument("--world_size", type=int, default=1, help="local_rank for distributed training on gpus") parser.add_argument("--gpu_size", type=int, default=1, help="local_rank for distributed training on gpus") parser.add_argument("--valid_size", type=int, default=1, help="local_rank for distributed training on gpus") parser.add_argument("--batch_size", type=int, default=1, help="local_rank for distributed training on gpus") parser.add_argument("--log_file", type=str, help="local_rank for distributed training on gpus") parser.add_argument("--field", type=str, help="local_rank for distributed training on gpus") parser.add_argument("--model_file", type=str, help="local_rank for distributed training on gpus") parser.add_argument("--batch_t", type=int, default=1, help="local_rank for distributed training on gpus") parser.add_argument("--iteration", type=int, default=1, help="local_rank for distributed training on gpus") parser.add_argument("--epoch", type=int, default=1, help="local_rank for distributed training on gpus") parser.add_argument("--batch_one_epoch", type=int, help="local_rank for distributed training on gpus") parser.add_argument('--use_start_pos', action='store_true', help='apply layernorm before each encoder block') # return parser.parse_args() # def parse_args_model(parser): parser.add_argument('--activation-fn', choices=fairseq_utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--decoder-output-dim', type=int, metavar='N', help='decoder output dimension (extra linear layer ' 'if different from decoder embed dim') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') parser.add_argument('--layernorm-embedding', action='store_true', help='add layernorm to embedding') parser.add_argument('--no-scale-embedding', action='store_true', help='if True, dont scale embeddings') # args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019) parser.add_argument('--no-cross-attention', default=False, action='store_true', help='do not perform cross-attention') parser.add_argument('--cross-self-attention', default=False, action='store_true', help='perform cross+self-attention') # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0, help='LayerDrop probability for encoder') parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0, help='LayerDrop probability for decoder') parser.add_argument('--encoder-layers-to-keep', default=None, help='which layers to *keep* when pruning as a comma-separated list') parser.add_argument('--decoder-layers-to-keep', default=None, help='which layers to *keep* when pruning as a comma-separated list') # args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0, help='iterative PQ quantization noise at training time') parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8, help='block size of quantization noise at training time') parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0, help='scalar quantization noise and scalar quantization at training time') return parser.parse_args() def base_architecture(args): #dropout 不确定 #decoder_layerdrop 不确定 #share_decoder_input_output_embed ok #decoder_embed_dim 不确定 #decoder_output_dim 不确定 #max_target_positions #no_scale_embedding #adaptive_input #quant_noise_pq #quant_noise_pq #quant_noise_pq_block_size #decoder_learned_pos 不确定 #no_token_positional_embeddings 不确定 #decoder_layers 不确定 #decoder_normalize_before 不确定但感觉应该是True #tie_adaptive_weights #adaptive_softmax_cutoff #adaptive_softmax_dropout #adaptive_softmax_factor #tie_adaptive_proj setattr(args, "encoder_embed_path", None) setattr(args, "encoder_embed_dim", 768) setattr(args, "encoder_ffn_embed_dim", 3072) setattr(args, "encoder_layers", 12) setattr(args, "encoder_attention_heads", 12) setattr(args, "encoder_normalize_before", True) setattr(args, "encoder_learned_pos", True) setattr(args, "decoder_embed_path", None) setattr(args, "decoder_embed_dim", args.encoder_embed_dim) setattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) #setattr(args, "decoder_layers", 12) setattr(args, "decoder_attention_heads", 12) setattr(args, "decoder_normalize_before", True) setattr(args, "decoder_learned_pos", True) setattr(args, "attention_dropout", 0.1) setattr(args, "activation_dropout", 0.0) setattr(args, "activation_fn", "relu") setattr(args, "dropout", 0.1) setattr(args, "adaptive_softmax_cutoff", None) setattr(args, "adaptive_softmax_dropout", 0) setattr( args, "share_decoder_input_output_embed", True ) setattr(args, "share_all_embeddings", True) setattr( args, "no_token_positional_embeddings", False ) setattr(args, "adaptive_input", False) setattr(args, "no_cross_attention", False) setattr(args, "cross_self_attention", False) setattr( args, "decoder_output_dim", args.decoder_embed_dim ) setattr(args, "decoder_input_dim", args.decoder_embed_dim) setattr(args, "no_scale_embedding", True) setattr(args, "layernorm_embedding", True) setattr(args, "tie_adaptive_weights", True)#不确定啊 print('???',args.encoder_embed_dim) def adjust_learning_rate(optimizer,iteration,lr=lr, T_warm=T_warm, all_iteration=all_iteration ):#得看一些一共有多少个iteration再确定 if iteration<=T_warm: lr=lr*float(iteration)/T_warm elif iteration<all_iteration: lr = lr * (1 - (iteration - T_warm) / (all_iteration - T_warm)) else: lr=0 for param_group in optimizer.param_groups: param_group['lr'] = lr def test(model,args,mlm_data,roberta_dict,decode_data,rerank): print('test...') cudaid=0 #model = nn.DataParallel(model, device_ids=list(range(args.world_size))) step=0 accum_batch_loss=0 accum_batch_loss_decode=0 accum_batch_loss_mask=0 accumulation_steps=0 batch_t=0 with torch.no_grad(): data_batch=utils.get_batch(mlm_data,roberta_dict,args.valid_size,decode_dataset=decode_data,rerank=rerank,mode='valid') for token_list, mask_label_list, decode_label_list in data_batch: #batch_t+=1 #assert candidate_id.shape[1]==2 # his_id=his_id.cuda(cudaid) # candidate_id= candidate_id.cuda(cudaid) # label = label.cuda(cudaid) # loss=model(his_id,candidate_id, label) batch_t+=token_list.shape[0] token_list=token_list.cuda(cudaid) mask_label_list=mask_label_list.cuda(cudaid) decode_label_list=decode_label_list.cuda(cudaid) loss_mask,sample_size_mask,loss_decode,sample_size_decode=model(token_list,mask_label_list,decode_label_list) loss_mask=loss_mask/sample_size_mask/math.log(2) loss_decode=loss_decode/sample_size_decode/math.log(2) loss=loss_mask+loss_decode # print('loss: ',loss,' sample_size: ',sample_size) # assert 1==0 accum_batch_loss+=float(loss) accum_batch_loss_mask+=float(loss_mask) accum_batch_loss_decode+=float(loss_decode) accumulation_steps+=1 if accumulation_steps%100==0: print('batch_t: ',batch_t) return accum_batch_loss/accumulation_steps, accum_batch_loss_mask/accumulation_steps, accum_batch_loss_decode/accumulation_steps def train(cudaid, args,model,roberta_dict,rerank): #pynvml.nvmlInit() dist.init_process_group( backend='nccl', init_method='env://', world_size=args.world_size, rank=cudaid) random.seed(1) np.random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed(1) print('params: '," T_warm: ",T_warm," all_iteration: ",all_iteration," lr: ",lr) #cuda_list=range(args.world_size) print('rank: ',cudaid) torch.cuda.set_device(cudaid) model.cuda(cudaid) accumulation_steps=int(args.batch_size/args.world_size/args.gpu_size) #optimizer = torch.optim.Adam(model.parameters(), lr=lr,betas=(0.9,0.98),eps=1e-6,weight_decay=0.0) # optimizer = apex.optimizers.FusedLAMB(model.parameters(), lr=lr,betas=(0.9,0.98),eps=1e-6,weight_decay=0.0,max_grad_norm=1.0) optimizer = apex.optimizers.FusedLAMB(model.parameters(), lr=lr,betas=(0.9,0.999),eps=1e-6,weight_decay=0.01,max_grad_norm=1.0) model, optimizer = amp.initialize(model, optimizer, opt_level='O2') model = DDP(model) mlm_data=utils.load_mask_data(os.path.join(args.data_dir,'data-bin-body1_3/train' ),roberta_dict) decode_data=utils.load_decode_data(os.path.join(args.data_dir,'data-bin-abs1_3/train'),roberta_dict) #model = nn.DataParallel(model, device_ids=cuda_list) # torch.distributed.init_process_group(backend='nccl', init_method='tcp://localhost:23456', rank=0, world_size=1) # torch.cuda.set_device(cudaid) #model, optimizer = amp.initialize(model, optimizer, opt_level="O2") #model=torch.nn.parallel.DistributedDataParallel(model, device_ids=cuda_list) #model = torch.nn.DataParallel(model) #model=apex.parallel.DistributedDataParallel(model) accum_batch_loss=0 accum_batch_loss_decode=0 accum_batch_loss_mask=0 all_batch_loss=0 all_batch_loss_decode=0 all_batch_loss_mask=0 #iterator=NewsIterator(batch_size=args.gpu_size, npratio=4,feature_file=os.path.join(args.data_dir,args.feature_file),field=args.field) #train_file=os.path.join(args.data_dir, args.data_file) #for epoch in range(0,100): batch_t=0 iteration=0 print('train...',args.field) #w=open(os.path.join(args.data_dir,args.log_file),'w') if cudaid==0: writer = SummaryWriter(os.path.join(args.data_dir, args.log_file) ) epoch=0 epoch_o=0 model.train() # batch_t=52880-1 # iteration=3305-1 batch_t=0 iteration=0 step=0 best_score=-1 step_t=0 start_pos=None batch_t_arg=0 #w=open(os.path.join(args.data_dir,args.log_file),'w') # model.eval() # auc=test(model,args) if args.model_file !=None: epoch_o=args.epoch iteration=args.iteration #batch_t=args.batch_t step=int(iteration/10000)+1 #best_score=args.best_score #start_pos=args.start_pos #start_pos=args.gpu_size*batch_t%(int((32255176-int(0.002*32255176))/args.world_size)+1) #batch_t_arg=args.gpu_size*batch_t%(int((32255176-int(0.002*32255176))/args.world_size)+1) if args.use_start_pos: start_pos=args.gpu_size*batch_t*2%(int((32255176-int(0.002*32255176))/args.world_size)+1) batch_t_arg=args.batch_t batch_t=args.batch_t elif args.batch_one_epoch!=None: batch_t_arg=args.batch_t%args.batch_one_epoch else: batch_t_arg=args.batch_t print('???',batch_t_arg,args.batch_t) for epoch in range(epoch_o,20): #while True: all_loss=0 all_batch=0 #data_batch=iterator.load_data_from_file(train_file,cudaid,args.world_size) data_batch=utils.get_batch(mlm_data,roberta_dict,args.gpu_size,decode_dataset=decode_data,rerank=rerank,mode='train',dist=True,cudaid=cudaid,size=args.world_size,start_pos=start_pos) start_pos=None#下次还是从开头开始 for token_list, mask_label_list, decode_label_list in data_batch: if epoch==epoch_o and batch_t<batch_t_arg: batch_t+=1 continue batch_t+=1 #assert candidate_id.shape[1]==2 # his_id=his_id.cuda(cudaid) # candidate_id= candidate_id.cuda(cudaid) # label = label.cuda(cudaid) # loss=model(his_id,candidate_id, label) token_list=token_list.cuda(cudaid) mask_label_list=mask_label_list.cuda(cudaid) decode_label_list=decode_label_list.cuda(cudaid) loss_mask,sample_size_mask,loss_decode,sample_size_decode=model(token_list,mask_label_list,decode_label_list) #print('????decode: ',sample_size_decode) #print('output: ',loss_mask,sample_size_mask,loss_decode,sample_size_decode) if sample_size_mask!=0: loss_mask=loss_mask/sample_size_mask/math.log(2) if sample_size_decode!=0: loss_decode=loss_decode/sample_size_decode/math.log(2) loss=loss_mask+loss_decode #loss=loss_decode # print('loss: ',loss,' sample_size: ',sample_size) # assert 1==0 # if cudaid==0: # print('shape: ',token_list.shape,' batch_t: ',batch_t,' loss: ',loss,' loss_mask: ',loss_mask,' loss_decode: ',loss_decode) accum_batch_loss+=float(loss) accum_batch_loss_mask+=float(loss_mask) accum_batch_loss_decode+=float(loss_decode) all_batch_loss+=float(loss) all_batch_loss_mask+=float(loss_mask) all_batch_loss_decode+=float(loss_decode) all_loss+=float(loss) all_batch+=1 loss = loss/accumulation_steps # loss.backward() with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() if (batch_t)%accumulation_steps==0: # handle = pynvml.nvmlDeviceGetHandleByIndex(cudaid) # meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle) # #print(int(meminfo.used)/1024/1024) # print('memory: ',int(meminfo.used)/1024/1024,' cudaid: ',cudaid) iteration+=1 adjust_learning_rate(optimizer,iteration) optimizer.step() optimizer.zero_grad() if cudaid==0: print(' batch_t: ',batch_t, ' iteration: ', iteration, ' epoch: ',epoch,' accum_batch_loss: ',accum_batch_loss/accumulation_steps,\ ' accum_batch_loss_mask: ',accum_batch_loss_mask/accumulation_steps, ' accum_batch_loss_decode: ',accum_batch_loss_decode/accumulation_steps,' lr: ', optimizer.param_groups[0]['lr']) writer.add_scalar('Loss/train', accum_batch_loss/accumulation_steps, iteration) writer.add_scalar('Loss_mask/train', accum_batch_loss_mask/accumulation_steps, iteration) writer.add_scalar('Loss_decode/train', accum_batch_loss_decode/accumulation_steps, iteration) writer.add_scalar('Loss_all/train', all_batch_loss/batch_t, iteration) writer.add_scalar('Loss_mask_all/train', all_batch_loss_mask/batch_t, iteration) writer.add_scalar('Loss_decode_all/train', all_batch_loss_decode/batch_t, iteration) writer.add_scalar('Ltr/train', optimizer.param_groups[0]['lr'], iteration) accum_batch_loss=0 accum_batch_loss_mask=0 accum_batch_loss_decode=0 if iteration%5000==0 and cudaid==0: torch.cuda.empty_cache() model.eval() if cudaid==0: accum_batch_loss_t, accum_batch_loss_mask_t, accum_batch_loss_decode_t=test(model,args,mlm_data,roberta_dict,decode_data,rerank) print('valid loss: ',accum_batch_loss_t, accum_batch_loss_mask_t, accum_batch_loss_decode_t) writer.add_scalar('Loss/valid', accum_batch_loss_t, step) writer.add_scalar('Loss_mask/valid', accum_batch_loss_mask_t, step) writer.add_scalar('Loss_decode/valid', accum_batch_loss_decode_t, step) step+=1 torch.save(model.state_dict(), os.path.join(args.save_dir,'pretrain_iteration'+str(iteration)+'.pkl')) # if auc>best_score: # torch.save(model.state_dict(), os.path.join(args.save_dir,'pretrain_best.pkl')) # best_score=auc # print('best score: ',best_score) torch.cuda.empty_cache() model.train() if cudaid==0: torch.save(model.state_dict(), os.path.join(args.save_dir,'doc_roberta'+str(epoch)+'.pkl')) #w.close() if __name__ == '__main__': # cuda_num=int(sys.argv[1]) random.seed(1) np.random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed(1) #main() parser = argparse.ArgumentParser("pretraining-model") args = parse_args(parser) #args=parse_args_model(parser) base_architecture(args) print('???',args.encoder_embed_dim) #roberta_dict=utils.load_dict(args.data_dir) roberta_dict=Dictionary.load(os.path.join(args.data_dir, 'roberta.base/dict.txt') ) model=Plain_bert(args,roberta_dict) #optimizer = torch.optim.Adam(model.parameters(), lr=lr,betas=(0.9,0.98),eps=1e-6,weight_decay=0.0) # for name, param in model.named_parameters(): # print(name,param.shape,param.requires_grad) #roberta = RobertaModel.from_pretrained(os.path.join(args.data_dir,'roberta.base'), checkpoint_file='model.pt') #roberta = RobertaModel.from_pretrained(os.path.join(args.data_dir,'roberta.base'), checkpoint_file=args.model_file) # for name, param in roberta.named_parameters(): # print(name,param.shape,param.requires_grad) # model_dict = model.state_dict() # pretrained_dict={} # for name,parameters in roberta.named_parameters(): # if 'lm_head' not in name: # pretrained_dict['encoder.'+name[31:]]=parameters # print(pretrained_dict.keys(),len(pretrained_dict.keys())) # model_dict.update(pretrained_dict) # model.load_state_dict(model_dict) if args.model_file !=None: model_dict = model.state_dict() model_file=os.path.join(args.save_dir,args.model_file) save_model=torch.load(model_file, map_location=lambda
<gh_stars>0 # <Copyright 2022, Argo AI, LLC. Released under the MIT license.> """Implements a pinhole camera interface.""" from __future__ import annotations from dataclasses import dataclass from functools import cached_property from pathlib import Path from typing import Tuple, Union import numpy as np import av2.geometry.geometry as geometry_utils import av2.utils.io as io_utils from av2.geometry.se3 import SE3 from av2.utils.typing import NDArrayBool, NDArrayFloat, NDArrayInt @dataclass(frozen=True) class Intrinsics: """Models a camera intrinsic matrix. Args: fx_px: Horizontal focal length in pixels. fy_px: Vertical focal length in pixels. cx_px: Horizontal focal center in pixels. cy_px: Vertical focal center in pixels. width_px: Width of image in pixels. height_px: Height of image in pixels. """ fx_px: float fy_px: float cx_px: float cy_px: float width_px: int height_px: int @cached_property def K(self) -> NDArrayFloat: """Camera intrinsic matrix.""" K: NDArrayFloat = np.eye(3, dtype=float) K[0, 0] = self.fx_px K[1, 1] = self.fy_px K[0, 2] = self.cx_px K[1, 2] = self.cy_px return K @dataclass(frozen=True) class PinholeCamera: """Parameterizes a pinhole camera with zero skew. Args: ego_SE3_cam: pose of camera in the egovehicle frame (inverse of extrinsics matrix). intrinsics: `Intrinsics` object containing intrinsic parameters and image dimensions. cam_name: sensor name that camera parameters correspond to. """ ego_SE3_cam: SE3 intrinsics: Intrinsics cam_name: str @property def width_px(self) -> int: """Return the width of the image in pixels.""" return self.intrinsics.width_px @property def height_px(self) -> int: """Return the height of the image in pixels.""" return self.intrinsics.height_px @cached_property def extrinsics(self) -> NDArrayFloat: """Return the camera extrinsics.""" return self.ego_SE3_cam.inverse().transform_matrix @classmethod def from_feather(cls, log_dir: Path, cam_name: str) -> PinholeCamera: """Create a pinhole camera model from a feather file. Note: Data is laid out with sensor names along row dimension, and columns are sensor attribute data. Args: log_dir: path to a log directory containing feather files w/ calibration info. cam_name: name of the camera. Returns: A new PinholeCamera object, containing camera extrinsics and intrinsics. """ intrinsics_path = log_dir / "calibration" / "intrinsics.feather" intrinsics_df = io_utils.read_feather(intrinsics_path).set_index("sensor_name") params = intrinsics_df.loc[cam_name] intrinsics = Intrinsics( fx_px=params["fx_px"], fy_px=params["fy_px"], cx_px=params["cx_px"], cy_px=params["cy_px"], width_px=int(params["width_px"]), height_px=int(params["height_px"]), ) sensor_name_to_pose = io_utils.read_ego_SE3_sensor(log_dir) return cls( ego_SE3_cam=sensor_name_to_pose[cam_name], intrinsics=intrinsics, cam_name=cam_name, ) def cull_to_view_frustum(self, uv: NDArrayFloat, points_cam: NDArrayFloat) -> NDArrayBool: """Cull 3d points to camera view frustum. Given a set of coordinates in the image plane and corresponding points in the camera coordinate reference frame, determine those points that have a valid projection into the image. 3d points with valid projections have x coordinates in the range [0,width_px-1], y-coordinates in the range [0,height_px-1], and a positive z-coordinate (lying in front of the camera frustum). Ref: https://en.wikipedia.org/wiki/Hidden-surface_determination#Viewing-frustum_culling Args: uv: Numpy array of shape (N,2) representing image plane coordinates in [0,W-1] x [0,H-1] where (H,W) are the image height and width. points_cam: Numpy array of shape (N,3) representing corresponding 3d points in the camera coordinate frame. Returns: Numpy boolean array of shape (N,) indicating which points fall within the camera view frustum. """ is_valid_x = np.logical_and(0 <= uv[:, 0], uv[:, 0] < self.width_px - 1) is_valid_y = np.logical_and(0 <= uv[:, 1], uv[:, 1] < self.height_px - 1) is_valid_z = points_cam[:, 2] > 0 is_valid_points: NDArrayBool = np.logical_and.reduce([is_valid_x, is_valid_y, is_valid_z]) return is_valid_points def project_ego_to_img( self, points_ego: NDArrayFloat, remove_nan: bool = False ) -> Tuple[NDArrayFloat, NDArrayFloat, NDArrayBool]: """Project a collection of 3d points (provided in the egovehicle frame) to the image plane. Args: points_ego: numpy array of shape (N,3) representing points in the egovehicle frame. remove_nan: whether to remove coordinates that project to invalid (NaN) values. Returns: uv: image plane coordinates, as Numpy array of shape (N,2). points_cam: camera frame coordinates as Numpy array of shape (N,3) representing is_valid_points: boolean indicator of valid cheirality and within image boundary, as boolean Numpy array of shape (N,). """ # convert cartesian to homogeneous coordinates. points_ego_hom = geometry_utils.cart_to_hom(points_ego) points_cam: NDArrayFloat = self.extrinsics @ points_ego_hom.T # remove bottom row of all 1s. uv = self.intrinsics.K @ points_cam[:3, :] uv = uv.T points_cam = points_cam.T if remove_nan: uv, points_cam = remove_nan_values(uv, points_cam) uv = uv[:, :2] / uv[:, 2].reshape(-1, 1) is_valid_points = self.cull_to_view_frustum(uv, points_cam) return uv, points_cam, is_valid_points def project_cam_to_img( self, points_cam: NDArrayFloat, remove_nan: bool = False ) -> Tuple[NDArrayFloat, NDArrayFloat, NDArrayBool]: """Project a collection of 3d points in the camera reference frame to the image plane. Args: points_cam: numpy array of shape (N,3) representing points in the egovehicle frame. remove_nan: whether to remove coordinates that project to invalid (NaN) values. Returns: uv: image plane coordinates, as Numpy array of shape (N,2). points_cam: camera frame coordinates as Numpy array of shape (N,3) representing is_valid_points: boolean indicator of valid cheirality and within image boundary, as boolean Numpy array of shape (N,). """ uv = self.intrinsics.K @ points_cam[:3, :] uv = uv.T points_cam = points_cam.T if remove_nan: uv, points_cam = remove_nan_values(uv, points_cam) uv = uv[:, :2] / uv[:, 2].reshape(-1, 1) is_valid_points = self.cull_to_view_frustum(uv, points_cam) return uv, points_cam, is_valid_points def project_ego_to_img_motion_compensated( self, points_lidar_time: NDArrayFloat, city_SE3_ego_cam_t: SE3, city_SE3_ego_lidar_t: SE3, ) -> Tuple[NDArrayFloat, NDArrayFloat, NDArrayBool]: """Project points in the ego frame to the image with motion compensation. Because of the high frame rate, motion compensation's role between the sensors is not very significant, moving points only by millimeters to centimeters. If the vehicle is moving at 25 miles per hour, equivalent to 11 meters/sec, then in 17 milliseconds (the max time between a lidar sweep and camera image capture) we should be able to move up to 187 millimeters. This can be verified in practice as the mean_change: mean_change = np.amax(points_h_cam_time.T[:,:3] - points_h_lidar_time ,axis=0) Adjust LiDAR points for egovehicle motion. This function accepts the egovehicle's pose in the city map both at camera time and also at the LiDAR time. We perform the following transformation: pt_egovehicle_cam_t = egovehicle_cam_t_SE3_city * city_SE3_egovehicle_lidar_t * pt_egovehicle_lidar_t Note that both "cam_time_points_h" and "lidar_time_points_h" are 3D points in the vehicle coordinate frame, but captured at different times. These LiDAR points always live in the vehicle frame, but just in different timestamps. If we take a lidar point in the egovehicle frame, captured at lidar time, and bring it into the map at this lidar timestamp, then we know the transformation from map to egovehicle reference frame at the time when the camera image was captured. Thus, we move from egovehicle @ lidar time, to the map (which is time agnostic), then we move from map to egovehicle @ camera time. Now we suddenly have lidar points living in the egovehicle frame @ camera time. Args: points_lidar_time: Numpy array of shape (N,3) city_SE3_ego_cam_t: egovehicle pose when camera image was recorded. city_SE3_ego_lidar_t: egovehicle pose when LiDAR sweep was recorded. Returns: uv: image plane coordinates, as Numpy array of shape (N,2). points_cam: Numpy array of shape (N,3) representing coordinates of points within the camera frame. is_valid_points_cam: boolean indicator of valid cheirality and within image boundary, as boolean Numpy array of shape (N,). Raises: ValueError: If `city_SE3_ego_cam_t` or `city_SE3_ego_lidar_t` is `None`. """ if city_SE3_ego_cam_t is None: raise ValueError("city_SE3_ego_cam_t cannot be `None`!") if city_SE3_ego_lidar_t is None: raise ValueError("city_SE3_ego_lidar_t cannot be `None`!") ego_cam_t_SE3_ego_lidar_t = city_SE3_ego_cam_t.inverse().compose(city_SE3_ego_lidar_t) points_cam_time = ego_cam_t_SE3_ego_lidar_t.transform_point_cloud(points_lidar_time) return self.project_ego_to_img(points_cam_time) @cached_property def right_clipping_plane(self) -> NDArrayFloat: """Form the right clipping plane for a camera view frustum. Returns: (4,) tuple of Hessian normal coefficients. """ a, b, c, d = -self.intrinsics.fx_px, 0.0, self.width_px / 2.0, 0.0 coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore return coeffs @cached_property def left_clipping_plane(self) -> NDArrayFloat: """Form the left clipping plane for a camera view frustum. Returns: (4,) tuple of Hessian normal coefficients. """ a, b, c, d = self.intrinsics.fx_px, 0.0, self.width_px / 2.0, 0.0 coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore return coeffs @cached_property def top_clipping_plane(self) -> NDArrayFloat: """Top clipping plane for a camera view frustum. Returns: (4,) tuple of Hessian normal coefficients. """ a, b, c, d = 0.0, self.intrinsics.fx_px, self.height_px / 2.0, 0.0 coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore return coeffs @cached_property def bottom_clipping_plane(self) -> NDArrayFloat: """Bottom clipping plane for a camera view frustum.
S, N, D = list(xyz_camXs.size()) assert(D==3) # occRs_half = __u(utils_vox.voxelize_xyz(__p(xyz_camRs), Z2, Y2, X2)) # utils for packing/unpacking along seq dim __p = lambda x: pack_seqdim(x, B) __u = lambda x: unpack_seqdim(x, B) camRs_T_camXs_ = __p(camRs_T_camXs) xyz_camXs_ = __p(xyz_camXs) xyz_camRs_ = utils_geom.apply_4x4(camRs_T_camXs_, xyz_camXs_) occXs_ = voxelize_xyz(xyz_camXs_, Z, Y, X) occRs_ = voxelize_xyz(xyz_camRs_, Z, Y, X) # note we must compute freespace in the given view, # then warp to the target view freeXs_ = get_freespace(xyz_camXs_, occXs_) freeRs_ = apply_4x4_to_vox(camRs_T_camXs_, freeXs_) occXs = __u(occXs_) occRs = __u(occRs_) freeXs = __u(freeXs_) freeRs = __u(freeRs_) # these are B x S x 1 x Z x Y x X if agg: # note we should only agg if we are in STATIC mode (time frozen) freeR = torch.max(freeRs, dim=1)[0] occR = torch.max(occRs, dim=1)[0] # these are B x 1 x Z x Y x X occR = (occR>0.5).float() freeR = (freeR>0.5).float() return occR, freeR, occXs, freeXs else: occRs = (occRs>0.5).float() freeRs = (freeRs>0.5).float() return occRs, freeRs, occXs, freeXs def assemble_padded_obj_masklist(lrtlist, scorelist, Z, Y, X, coeff=1.0): # compute a binary mask in 3D for each object # we use this when computing the center-surround objectness score # lrtlist is B x N x 19 # scorelist is B x N # returns masklist shaped B x N x 1 x Z x Y x X B, N, D = list(lrtlist.shape) assert(D==19) masks = torch.zeros(B, N, Z, Y, X) lenlist, ref_T_objlist = utils_geom.split_lrtlist(lrtlist) # lenlist is B x N x 3 # ref_T_objlist is B x N x 4 x 4 lenlist_ = lenlist.reshape(B*N, 3) ref_T_objlist_ = ref_T_objlist.reshape(B*N, 4, 4) obj_T_reflist_ = utils_geom.safe_inverse(ref_T_objlist_) # we want a value for each location in the mem grid xyz_mem_ = gridcloud3D(B*N, Z, Y, X) # this is B*N x V x 3, where V = Z*Y*X xyz_ref_ = Mem2Ref(xyz_mem_, Z, Y, X) # this is B*N x V x 3 lx, ly, lz = torch.unbind(lenlist_, dim=1) # these are B*N # ref_T_obj = convert_box_to_ref_T_obj(boxes3D) # obj_T_ref = ref_T_obj.inverse() xyz_obj_ = utils_geom.apply_4x4(obj_T_reflist_, xyz_ref_) x, y, z = torch.unbind(xyz_obj_, dim=2) # these are B*N x V lx = lx.unsqueeze(1)*coeff ly = ly.unsqueeze(1)*coeff lz = lz.unsqueeze(1)*coeff # these are B*N x 1 x_valid = (x > -lx/2.0).byte() & (x < lx/2.0).byte() y_valid = (y > -ly/2.0).byte() & (y < ly/2.0).byte() z_valid = (z > -lz/2.0).byte() & (z < lz/2.0).byte() inbounds = x_valid.byte() & y_valid.byte() & z_valid.byte() masklist = inbounds.float() # print(masklist.shape) masklist = masklist.reshape(B, N, 1, Z, Y, X) # print(masklist.shape) # print(scorelist.shape) masklist = masklist*scorelist.view(B, N, 1, 1, 1, 1) return masklist def get_zoom_T_ref(lrt, Z, Y, X, additive_pad=0.0): # lrt is B x 19 B, E = list(lrt.shape) assert(E==19) lens, ref_T_obj = utils_geom.split_lrt(lrt) lx, ly, lz = lens.unbind(1) debug = False if debug: print('lx, ly, lz') print(lx) print(ly) print(lz) obj_T_ref = utils_geom.safe_inverse(ref_T_obj) # this is B x 4 x 4 if debug: print('ok, got obj_T_ref:') print(obj_T_ref) # we want a tiny bit of padding # additive helps avoid nans with invalid objects # mult helps expand big objects lx = lx + additive_pad ly = ly + additive_pad*0.5 # pad less in this dim, since it is usually pointless lz = lz + additive_pad # lx *= 1.1 # ly *= 1.1 # lz *= 1.1 # translation center_T_obj_r = utils_geom.eye_3x3(B) center_T_obj_t = torch.stack([lx/2., ly/2., lz/2.], dim=1) if debug: print('merging these:') print(center_T_obj_r.shape) print(center_T_obj_t.shape) center_T_obj = utils_geom.merge_rt(center_T_obj_r, center_T_obj_t) if debug: print('ok, got center_T_obj:') print(center_T_obj) # scaling Z_VOX_SIZE_X = (lx)/float(X) Z_VOX_SIZE_Y = (ly)/float(Y) Z_VOX_SIZE_Z = (lz)/float(Z) diag = torch.stack([1./Z_VOX_SIZE_X, 1./Z_VOX_SIZE_Y, 1./Z_VOX_SIZE_Z, torch.ones([B], device=torch.device('cuda'))], axis=1).view(B, 4) if debug: print('diag:') print(diag) print(diag.shape) zoom_T_center = torch.diag_embed(diag) if debug: print('ok, got zoom_T_center:') print(zoom_T_center) print(zoom_T_center.shape) # compose these zoom_T_obj = utils_basic.matmul2(zoom_T_center, center_T_obj) if debug: print('ok, got zoom_T_obj:') print(zoom_T_obj) print(zoom_T_obj.shape) zoom_T_ref = utils_basic.matmul2(zoom_T_obj, obj_T_ref) if debug: print('ok, got zoom_T_ref:') print(zoom_T_ref) return zoom_T_ref def get_ref_T_zoom(lrt, Z, Y, X, additive_pad=0.1): # lrt is B x 19 zoom_T_ref = get_zoom_T_ref(lrt, Z, Y, X, additive_pad=additive_pad) # note safe_inverse is inapplicable here, # since the transform is nonrigid ref_T_zoom = zoom_T_ref.inverse() return ref_T_zoom def Ref2Zoom(xyz_ref, lrt_ref, Z, Y, X, additive_pad=0.1): # xyz_ref is B x N x 3, in ref coordinates # lrt_ref is B x 19, specifying the box in ref coordinates # this transforms ref coordinates into zoom coordinates B, N, _ = list(xyz_ref.shape) zoom_T_ref = get_zoom_T_ref(lrt_ref, Z, Y, X, additive_pad=additive_pad) xyz_zoom = utils_geom.apply_4x4(zoom_T_ref, xyz_ref) return xyz_zoom def Zoom2Ref(xyz_zoom, lrt_ref, Z, Y, X, additive_pad=0.1): # xyz_zoom is B x N x 3, in zoom coordinates # lrt_ref is B x 9, specifying the box in ref coordinates B, N, _ = list(xyz_zoom.shape) ref_T_zoom = get_ref_T_zoom(lrt_ref, Z, Y, X, additive_pad=additive_pad) xyz_ref = utils_geom.apply_4x4(ref_T_zoom, xyz_zoom) return xyz_ref def crop_zoom_from_mem(mem, lrt, Z2, Y2, X2, additive_pad=0.1): # mem is B x C x Z x Y x X # lrt is B x 19 B, C, Z, Y, X = list(mem.shape) B2, E = list(lrt.shape) assert(E==19) assert(B==B2) # for each voxel in the zoom grid, i want to # sample a voxel from the mem # this puts each C-dim pixel in the image # along a ray in the zoomed voxelgrid xyz_zoom = utils_basic.gridcloud3D(B, Z2, Y2, X2, norm=False) # these represent the zoom grid coordinates # we need to convert these to mem coordinates xyz_ref = Zoom2Ref(xyz_zoom, lrt, Z2, Y2, X2, additive_pad=additive_pad) xyz_mem = Ref2Mem(xyz_ref, Z, Y, X) zoom = utils_samp.sample3D(mem, xyz_mem, Z2, Y2, X2) zoom = torch.reshape(zoom, [B, C, Z2, Y2, X2]) return zoom def center_mem_on_xyz(mem, xyz, Z2, Y2, X2): # mem is B x C x Z x Y x X # xyz is B x 3 B, C, Z, Y, X = list(mem.shape) B2, D = list(xyz.shape) assert(D==3) assert(B==B2) # from the xyz i'll make a fat lrt # then call crop_zoom_from_mem xyzlist = xyz.unsqueeze(1) # B x 1 x 3 lenlist = torch.ones_like(xyzlist)*10.0 # 10m cube rotlist = torch.zeros_like(xyzlist) # no rot boxlist = torch.cat([xyzlist, lenlist, rotlist], dim=2) # boxlist is B x 1 x 9 lrtlist = utils_geom.convert_boxlist_to_lrtlist(boxlist) lrt = lrtlist.squeeze(1) # lrt is B x 19 return crop_zoom_from_mem(mem, lrt, Z2, Y2, X2, additive_pad=0.0) def assemble(bkg_feat0, obj_feat0, origin_T_camRs, camRs_T_zoom): # let's first assemble the seq of background tensors # this should effectively CREATE egomotion # i fully expect we can do this all in one shot # note it makes sense to create egomotion here, because # we want to predict each view B, C, Z, Y, X = list(bkg_feat0.shape) B2, C2, Z2, Y2, X2 = list(obj_feat0.shape) assert(B==B2) assert(C==C2) B, S, _, _ = list(origin_T_camRs.shape) # ok, we have everything we need # for each timestep, we want to warp the bkg to this timestep # utils for packing/unpacking along seq dim __p = lambda x: pack_seqdim(x, B) __u = lambda x: unpack_seqdim(x, B) # we in fact have utils for this already cam0s_T_camRs = utils_geom.get_camM_T_camXs(origin_T_camRs, ind=0) camRs_T_cam0s = __u(utils_geom.safe_inverse(__p(cam0s_T_camRs))) bkg_feat0s = bkg_feat0.unsqueeze(1).repeat(1, S, 1, 1, 1, 1) bkg_featRs = apply_4x4s_to_voxs(camRs_T_cam0s, bkg_feat0s) # now for the objects # we want to sample for each location in the bird grid xyz_mems_ = utils_basic.gridcloud3D(B*S, Z, Y, X, norm=False) # this is B*S x Z*Y*X x 3 xyz_camRs_ = Mem2Ref(xyz_mems_, Z, Y, X) camRs_T_zoom_ = __p(camRs_T_zoom) zoom_T_camRs_ = camRs_T_zoom_.inverse() # note this is not a rigid transform xyz_zooms_ = utils_geom.apply_4x4(zoom_T_camRs_, xyz_camRs_) # we will do the whole traj at once (per obj) # note we just have one feat for the whole traj, so we tile up obj_feats = obj_feat0.unsqueeze(1).repeat(1, S, 1, 1, 1, 1) obj_feats_ = __p(obj_feats) # this is B*S x Z x Y x X x C # to sample, we need feats_ in ZYX order obj_featRs_ = utils_samp.sample3D(obj_feats_, xyz_zooms_, Z, Y, X) obj_featRs = __u(obj_featRs_) # overweigh objects, so that we essentially overwrite # featRs = 0.05*bkg_featRs
self.thrift_spec))) return oprot.writeStructBegin('TAlterSentryRoleRevokePrivilegeRequest') if self.protocol_version is not None: oprot.writeFieldBegin('protocol_version', TType.I32, 1) oprot.writeI32(self.protocol_version) oprot.writeFieldEnd() if self.requestorUserName is not None: oprot.writeFieldBegin('requestorUserName', TType.STRING, 2) oprot.writeString(self.requestorUserName) oprot.writeFieldEnd() if self.roleName is not None: oprot.writeFieldBegin('roleName', TType.STRING, 3) oprot.writeString(self.roleName) oprot.writeFieldEnd() if self.component is not None: oprot.writeFieldBegin('component', TType.STRING, 4) oprot.writeString(self.component) oprot.writeFieldEnd() if self.privilege is not None: oprot.writeFieldBegin('privilege', TType.STRUCT, 5) self.privilege.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.protocol_version is None: raise TProtocol.TProtocolException(message='Required field protocol_version is unset!') if self.requestorUserName is None: raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!') if self.roleName is None: raise TProtocol.TProtocolException(message='Required field roleName is unset!') if self.component is None: raise TProtocol.TProtocolException(message='Required field component is unset!') if self.privilege is None: raise TProtocol.TProtocolException(message='Required field privilege is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.protocol_version) value = (value * 31) ^ hash(self.requestorUserName) value = (value * 31) ^ hash(self.roleName) value = (value * 31) ^ hash(self.component) value = (value * 31) ^ hash(self.privilege) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class TAlterSentryRoleRevokePrivilegeResponse(object): """ Attributes: - status """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1 ) def __init__(self, status=None,): self.status = status def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.status = sentry_common_service.ttypes.TSentryResponseStatus() self.status.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('TAlterSentryRoleRevokePrivilegeResponse') if self.status is not None: oprot.writeFieldBegin('status', TType.STRUCT, 1) self.status.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.status is None: raise TProtocol.TProtocolException(message='Required field status is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.status) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class TListSentryRolesRequest(object): """ Attributes: - protocol_version - requestorUserName - groupName - component """ thrift_spec = ( None, # 0 (1, TType.I32, 'protocol_version', None, 2, ), # 1 (2, TType.STRING, 'requestorUserName', None, None, ), # 2 (3, TType.STRING, 'groupName', None, None, ), # 3 (4, TType.STRING, 'component', None, None, ), # 4 ) def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, groupName=None, component=None,): self.protocol_version = protocol_version self.requestorUserName = requestorUserName self.groupName = groupName self.component = component def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.protocol_version = iprot.readI32() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.requestorUserName = iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.groupName = iprot.readString() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: self.component = iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('TListSentryRolesRequest') if self.protocol_version is not None: oprot.writeFieldBegin('protocol_version', TType.I32, 1) oprot.writeI32(self.protocol_version) oprot.writeFieldEnd() if self.requestorUserName is not None: oprot.writeFieldBegin('requestorUserName', TType.STRING, 2) oprot.writeString(self.requestorUserName) oprot.writeFieldEnd() if self.groupName is not None: oprot.writeFieldBegin('groupName', TType.STRING, 3) oprot.writeString(self.groupName) oprot.writeFieldEnd() if self.component is not None: oprot.writeFieldBegin('component', TType.STRING, 4) oprot.writeString(self.component) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.protocol_version is None: raise TProtocol.TProtocolException(message='Required field protocol_version is unset!') if self.requestorUserName is None: raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!') if self.component is None: raise TProtocol.TProtocolException(message='Required field component is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.protocol_version) value = (value * 31) ^ hash(self.requestorUserName) value = (value * 31) ^ hash(self.groupName) value = (value * 31) ^ hash(self.component) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class TSentryRole(object): """ Attributes: - roleName - groups """ thrift_spec = ( None, # 0 (1, TType.STRING, 'roleName', None, None, ), # 1 (2, TType.SET, 'groups', (TType.STRING,None), None, ), # 2 ) def __init__(self, roleName=None, groups=None,): self.roleName = roleName self.groups = groups def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.roleName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.SET: self.groups = set() (_etype24, _size21) = iprot.readSetBegin() for _i25 in xrange(_size21): _elem26 = iprot.readString() self.groups.add(_elem26) iprot.readSetEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('TSentryRole') if self.roleName is not None: oprot.writeFieldBegin('roleName', TType.STRING, 1) oprot.writeString(self.roleName) oprot.writeFieldEnd() if self.groups is not None: oprot.writeFieldBegin('groups', TType.SET, 2) oprot.writeSetBegin(TType.STRING, len(self.groups)) for iter27 in self.groups: oprot.writeString(iter27) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.roleName is None: raise TProtocol.TProtocolException(message='Required field roleName is unset!') if self.groups is None: raise TProtocol.TProtocolException(message='Required field groups is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.roleName) value = (value * 31) ^ hash(frozenset(self.groups)) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class TListSentryRolesResponse(object): """ Attributes: - status - roles """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1 (2, TType.SET, 'roles', (TType.STRUCT,(TSentryRole, TSentryRole.thrift_spec)), None, ), # 2 ) def __init__(self, status=None, roles=None,): self.status = status self.roles = roles def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.status = sentry_common_service.ttypes.TSentryResponseStatus() self.status.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.SET: self.roles = set() (_etype31, _size28) = iprot.readSetBegin() for _i32 in xrange(_size28): _elem33 = TSentryRole() _elem33.read(iprot) self.roles.add(_elem33) iprot.readSetEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('TListSentryRolesResponse') if self.status is not None: oprot.writeFieldBegin('status', TType.STRUCT, 1) self.status.write(oprot) oprot.writeFieldEnd() if self.roles is not None: oprot.writeFieldBegin('roles', TType.SET, 2) oprot.writeSetBegin(TType.STRUCT, len(self.roles)) for iter34 in self.roles: iter34.write(oprot) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.status is None: raise TProtocol.TProtocolException(message='Required field status is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.status) value = (value * 31) ^ hash(self.roles) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class TListSentryPrivilegesRequest(object): """ Attributes: - protocol_version - requestorUserName - roleName - component - serviceName - authorizables """ thrift_spec = ( None, # 0 (1, TType.I32, 'protocol_version', None, 2, ), # 1 (2, TType.STRING, 'requestorUserName', None, None, ), # 2 (3, TType.STRING, 'roleName', None, None, ), # 3 (4, TType.STRING, 'component', None, None, ), # 4 (5, TType.STRING, 'serviceName', None, None, ), # 5 (6, TType.LIST, 'authorizables', (TType.STRUCT,(TAuthorizable, TAuthorizable.thrift_spec)), None, ), # 6 ) def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None,
user = user + user_name # call to get tasks list task_list = fgapisrv_db.get_task_list(user, app_id) db_state = fgapisrv_db.get_state() if db_state[0] != 0: # DBError getting TaskList # Prepare for 402 task_state = 402 task_response = { "message": db_state[1] } else: # Prepare response task_response = {} task_array = [] task_state = 200 for task_id in task_list: task_record = fgapisrv_db.get_task_record(task_id) db_state = fgapisrv_db.get_state() if db_state[0] != 0: # DBError getting TaskRecord # Prepare for 403 task_state = 403 task_array = { "message": db_state[1] } else: task_array += [{ "id": task_record['id'], "application": task_record['application'], "description": task_record['description'], "arguments": task_record['arguments'], "input_files": task_record['input_files'], "output_files": task_record['output_files'], "status": task_record['status'], "user": task_record['user'], "date": str(task_record['creation']), "last_change": str(task_record['last_change']), "_links": [ {"rel": "self", "href": "/%s/tasks/%s" % (fgapiver, task_id) }, {"rel": "input", "href": "/%s/tasks/%s/input" % (fgapiver, task_id) } ]}, ] task_response = {"tasks": task_array} # When page, per_page are not none # (page=0..(len(task_response)/per_page)-1) # if page is not None and per_page is not None: # task_response = task_response[page*per_page:(page+1)*per_page] js = json.dumps(paginate_response( task_response, page, per_page), indent=fgjson_indent) resp = Response(js, status=task_state, mimetype='application/json') resp.headers['Content-type'] = 'application/json' return resp elif request.method == 'POST': print "username %s - %s" % (user_name, user) auth_state, auth_msg = authorize_user( current_user, app_id, user, "app_run") if not auth_state: task_state = 402 task_response = { "message": "Not authorized to perform this request:\n%s" % auth_msg} else: # Getting values params = request.get_json() if params is not None: app_id = params.get('application', '') app_desc = params.get('description', '') app_args = params.get('arguments', []) app_inpf = params.get('input_files', []) app_outf = params.get('output_files', []) # Connect database fgapisrv_db = FGAPIServerDB( db_host=fgapisrv_db_host, db_port=fgapisrv_db_port, db_user=fgapisrv_db_user, db_pass=<PASSWORD>_pass, db_name=fgapisrv_db_name, iosandbbox_dir=fgapisrv_iosandbox, geapiserverappid=fgapisrv_geappid) db_state = fgapisrv_db.get_state() if db_state[0] != 0: # Couldn't contact database # Prepare for 404 not found task_state = 404 task_response = { "message": db_state[1] } else: # Create task task_id = fgapisrv_db.init_task( app_id, app_desc, user, app_args, app_inpf, app_outf) if task_id < 0: task_state = fgapisrv_db.get_state() # Error initializing task # Prepare for 410 error task_state = 410 task_response = { 'message': task_state[1] } else: # Prepare response task_state = 200 task_record = fgapisrv_db.get_task_record(task_id) task_response = { "id": task_record['id'], "application": task_record['application'], "description": task_record['description'], "arguments": task_record['arguments'], "input_files": task_record['input_files'], "output_files": task_record['output_files'], "status": task_record['status'], "user": task_record['user'], "date": str( task_record['last_change']), "_links": [ { "rel": "self", "href": "/%s/tasks/%s" % (fgapiver, task_id)}, { "rel": "input", "href": "/%s/tasks/%s/input" % (fgapiver, task_id)}]} else: task_state = 404 task_response = { "message": ("Did not find any application description " "json input")} js = json.dumps(task_response, indent=fgjson_indent) resp = Response(js, status=task_state, mimetype='application/json') resp.headers['Content-type'] = 'application/json' if task_state == 200: resp.headers.add('Location', '/v1.0/tasks/%s' % task_id) resp.headers.add('Link', ('</v1.0/tasks/%s/input>; ' 'rel="input", </v1.0/tasks/%s>; rel="self"') % (task_id, task_id)) return resp # This is an informative call # GET - shows details # POST - could reshape the request (Delete/Recreate) @app.route( '/%s/tasks/<task_id>' % fgapiver, methods=[ 'GET', 'POST', 'DELETE', 'PATCH']) @login_required def task_id(task_id=None): user_name = current_user.get_name() user_id = current_user.get_id() app_id = get_task_app_id(task_id) user = request.values.get('user', user_name) if request.method == 'GET': auth_state, auth_msg = authorize_user( current_user, app_id, user, "task_view") if not auth_state: task_state = 402 task_response = { "message": "Not authorized to perform this request:\n%s" % auth_msg} else: fgapisrv_db = FGAPIServerDB( db_host=fgapisrv_db_host, db_port=fgapisrv_db_port, db_user=fgapisrv_db_user, db_pass=<PASSWORD>, db_name=fgapisrv_db_name, iosandbbox_dir=fgapisrv_iosandbox, fgapiserverappid=fgapisrv_geappid) db_state = fgapisrv_db.get_state() if db_state[0] != 0: # Couldn't contact database # Prepare for 404 not found task_status = 404 task_response = { "message": db_state[1] } elif not fgapisrv_db.task_exists(task_id): task_status = 404 task_response = { "message": "Unable to find task with id: %s" % task_id } else: # Get task details task_response = fgapisrv_db.get_task_record(task_id) db_state = fgapisrv_db.get_state() if db_state[0] != 0: # Couldn't get TaskRecord # Prepare for 404 not found task_status = 404 task_response = { "message": db_state[1] } else: task_status = 200 # Display task details js = json.dumps(task_response, indent=fgjson_indent) resp = Response(js, status=task_status, mimetype='application/json') resp.headers['Content-type'] = 'application/json' return resp elif request.method == 'DELETE': auth_state, auth_msg = authorize_user( current_user, app_id, user, "task_delete") if not auth_state: task_state = 402 task_response = { "message": "Not authorized to perform this request:\n%s" % auth_msg} else: fgapisrv_db = FGAPIServerDB( db_host=fgapisrv_db_host, db_port=fgapisrv_db_port, db_user=fgapisrv_db_user, db_pass=<PASSWORD>, db_name=fgapisrv_db_name, iosandbbox_dir=fgapisrv_iosandbox, fgapiserverappid=fgapisrv_geappid) db_state = fgapisrv_db.get_state() if db_state[0] != 0: # Couldn't contact database # Prepare for 404 not found task_status = 404 task_response = { "message": db_state[1] } elif not fgapisrv_db.task_exists(task_id): task_status = 404 task_response = { "message": "Unable to find task with id: %s" % task_id } elif not fgapisrv_db.delete(task_id): task_status = 410 task_response = { "message": "Unable to delete task with id: %s" % task_id } else: task_status = 200 task_response = { "message": "Successfully removed task with id: %s" % task_id} js = json.dumps(task_response, indent=fgjson_indent) resp = Response(js, status=task_status, mimetype='application/json') resp.headers['Content-type'] = 'application/json' return resp elif request.method == 'PATCH': # PATCH on tasks accepts status change or on runtime_data new_status = params.get('status', None) if new_status is not None: # status change: auth_state, auth_msg = authorize_user( current_user, app_id, user, "task_statuschange") if not auth_state: task_state = 402 task_response = { "message": "Not authorized to perform status change " "request:\n%s" % auth_msg} else: fgapisrv_db = FGAPIServerDB( db_host=fgapisrv_db_host, db_port=fgapisrv_db_port, db_user=fgapisrv_db_user, db_pass=<PASSWORD>, db_name=fgapisrv_db_name, iosandbbox_dir=fgapisrv_iosandbox, fgapiserverappid=fgapisrv_geappid) db_state = fgapisrv_db.get_state() if db_state[0] != 0: # Couldn't contact database # Prepare for 404 not found task_status = 404 task_response = { "message": db_state[1] } elif not fgapisrv_db.task_exists(task_id): task_status = 404 task_response = { "message": "Unable to find task with id: %s" % task_id } elif not fgapisrv_db.status_change(task_id, new_status): task_status = 410 task_response = { "message": ("Unable to change status for task having " "id: %s" % task_id) } else: task_status = 200 task_response = { "message": "Successfully changed status of task with" " id: %s" % task_id } else: # runtime_data: # # The input consists in a json having the form # { "runtime_data" : [ # { "data_name": "name" # ,"data_value": "value" # ,"data_desc": "description of the value" # ,"data_type": "how client receives the file" # ,"data_proto": "protocol used to access data" # }, ... ] } # # The insertion policy will be: # 1) data_name does not exists, a new record will be created in # runtime_data table # 2) data_name exists the new value will be updated to the # existing name # auth_state, auth_msg = authorize_user( current_user, app_id, user, "task_userdata") if not auth_state: task_state = 402 task_response = { "message": "Not authorized to perform this request:\n%s" % auth_msg} else: params = request.get_json() runtime_data = params.get('runtime_data', []) fgapisrv_db = FGAPIServerDB( db_host=fgapisrv_db_host, db_port=fgapisrv_db_port, db_user=fgapisrv_db_user, db_pass=<PASSWORD>, db_name=fgapisrv_db_name, iosandbbox_dir=fgapisrv_iosandbox, fgapiserverappid=fgapisrv_geappid) db_state = fgapisrv_db.get_state() if db_state[0] != 0: # Couldn't contact database # Prepare for 404 not found task_status = 404 task_response = { "message": db_state[1] } elif not fgapisrv_db.task_exists(task_id): task_status = 404 task_response = { "message": "Unable to find task with id: %s" % task_id } elif not fgapisrv_db.patch_task(task_id, runtime_data): task_status = 410 task_response = { "message": ("Unable store runtime data for task " "having id: %s" % task_id) } else: task_status = 200 task_response = { "message": "Successfully patched task with id: %s" % task_id} js = json.dumps(task_response, indent=fgjson_indent) resp = Response(js, status=task_status, mimetype='application/json') resp.headers['Content-type'] = 'application/json' return resp elif request.method == 'POST': task_response = { "message": "Not supported method" } js = json.dumps(task_response, indent=fgjson_indent) resp = Response(js, status=404, mimetype='application/json') resp.headers['Content-type'] = 'application/json' return resp # This finalizes the task request allowing to submit the task # GET - shows input files # POST - specify input files @app.route('/%s/tasks/<task_id>/input' % fgapiver, methods=['GET', 'POST']) @login_required def task_id_input(task_id=None): user_name = current_user.get_name() user_id = current_user.get_id() app_id = get_task_app_id(task_id) user = request.values.get('user', user_name) task_status = 404 if request.method == 'GET': auth_state, auth_msg = authorize_user( current_user, app_id, user, "task_view") if not auth_state: task_state = 402 task_response = { "message": "Not authorized to perform this request:\n%s" % auth_msg} else: # Display task_input_file details fgapisrv_db = FGAPIServerDB( db_host=fgapisrv_db_host, db_port=fgapisrv_db_port, db_user=fgapisrv_db_user, db_pass=<PASSWORD>, db_name=fgapisrv_db_name, iosandbbox_dir=fgapisrv_iosandbox, geapiserverappid=fgapisrv_geappid) db_state = fgapisrv_db.get_state() if db_state[0] != 0: # Couldn't contact database # Prepare for 404 not found task_status = 404 task_response = { "message": db_state[1] } elif not fgapisrv_db.task_exists(task_id): task_status = 404 task_response = { "message": "Unable to find task with id: %s" %
<reponame>camponogaraviera/qutip<gh_stars>1000+ import os import numpy as np from qutip.interpolate import Cubic_Spline _cython_path = os.path.dirname(os.path.abspath(__file__)).replace("\\", "/") _include_string = "'"+_cython_path+"/complex_math.pxi'" __all__ = ['Codegen'] class Codegen(): """ Class for generating cython code files at runtime. """ def __init__(self, h_terms=None, h_tdterms=None, h_td_inds=None, args=None, c_terms=None, c_tdterms=[], c_td_inds=None, c_td_splines=[], c_td_spline_flags=[], type='me', config=None, use_openmp=False, omp_components=None, omp_threads=None): import sys import os sys.path.append(os.getcwd()) # Hamiltonian time-depdendent pieces self.type = type if isinstance(h_terms, int): h_terms = range(h_terms) self.h_terms = h_terms # number of H pieces self.h_tdterms = h_tdterms # list of time-dependent strings self.h_td_inds = h_td_inds # indicies of time-dependnt terms self.args = args # args for strings # Collapse operator time-depdendent pieces self.c_terms = c_terms # number of C pieces self.c_tdterms = c_tdterms # list of time-dependent strings self.c_td_inds = c_td_inds # indicies of time-dependent terms self.c_td_splines = c_td_splines #List of c_op spline arrays self.c_td_spline_flags = c_td_spline_flags #flags for oper or super # Code generator properties self.code = [] # strings to be written to file self.level = 0 # indent level self.config = config #openmp settings self.use_openmp = use_openmp self.omp_components = omp_components self.omp_threads = omp_threads def write(self, string): """write lines of code to self.code""" self.code.append(" " * self.level + string + "\n") def file(self, filename): """open file called filename for writing""" self.file = open(filename, "w") def generate(self, filename="rhs.pyx"): """generate the file""" for line in cython_preamble(self.use_openmp): self.write(line) # write function for Hamiltonian terms (there is always at least one # term) for line in cython_checks() + self.ODE_func_header(): self.write(line) self.indent() for line in self.func_vars(): self.write(line) self.write(self.func_end()) self.dedent() # generate collapse operator functions if any c_terms if any(self.c_tdterms): for line in (cython_checks() + self.col_spmv_header() + cython_col_spmv()): self.write(line) self.indent() for line in self.func_which(): self.write(line) self.write(self.func_end()) self.dedent() for line in (cython_checks() + self.col_expect_header() + cython_col_expect(self.args)): self.write(line) self.indent() for line in self.func_which_expect(): self.write(line) self.write(self.func_end_real()) self.dedent() self.file(filename) self.file.writelines(self.code) self.file.close() self.config.cgen_num += 1 def indent(self): """increase indention level by one""" self.level += 1 def dedent(self): """decrease indention level by one""" if self.level == 0: raise SyntaxError("Error in code generator") self.level -= 1 def _get_arg_str(self, args): if len(args) == 0: return '' ret = '' for name, value in self.args.items(): if isinstance(value, np.ndarray): ret += ",\n np.ndarray[np.%s_t, ndim=1] %s" % \ (value.dtype.name, name) else: if isinstance(value, (int, np.int32, np.int64)): kind = 'int' elif isinstance(value, (float, np.float32, np.float64)): kind = 'float' elif isinstance(value, (complex, np.complex128)): kind = 'complex' #kind = type(value).__name__ ret += ",\n " + kind + " " + name return ret def ODE_func_header(self): """Creates function header for time-dependent ODE RHS.""" func_name = "def cy_td_ode_rhs(" # strings for time and vector variables input_vars = ("\n double t" + ",\n complex[::1] vec") for k in self.h_terms: input_vars += (",\n " + "complex[::1] data%d," % k + "int[::1] idx%d," % k + "int[::1] ptr%d" % k) kk = len(self.h_tdterms) for jj in range(len(self.c_td_splines)): input_vars += (",\n " + "complex[::1] data%d," % (jj+kk) + "int[::1] idx%d," % (jj+kk) + "int[::1] ptr%d" % (jj+kk)) if any(self.c_tdterms): for k in range(len(self.h_terms), len(self.h_terms) + len(self.c_tdterms)): input_vars += (",\n " + "complex[::1] data%d," % k + "int[::1] idx%d," % k + "int[::1] ptr%d" % k) #Add array for each Cubic_Spline term spline = 0 for htd in (self.h_tdterms+self.c_td_splines): if isinstance(htd, Cubic_Spline): if not htd.is_complex: input_vars += (",\n " + "double[::1] spline%d" % spline) else: input_vars += (",\n " + "complex[::1] spline%d" % spline) spline += 1 input_vars += self._get_arg_str(self.args) func_end = "):" return [func_name + input_vars + func_end] def col_spmv_header(self): """ Creates function header for time-dependent collapse operator terms. """ func_name = "def col_spmv(" input_vars = ("int which, double t, complex[::1] " + "data, int[::1] idx, int[::1] " + "ptr, complex[::1] vec") input_vars += self._get_arg_str(self.args) func_end = "):" return [func_name + input_vars + func_end] def col_expect_header(self): """ Creates function header for time-dependent collapse expectation values. """ func_name = "def col_expect(" input_vars = ("int which, double t, complex[::1] " + "data, int[::1] idx, int[::1] " + "ptr, complex[::1] vec") input_vars += self._get_arg_str(self.args) func_end = "):" return [func_name + input_vars + func_end] def func_vars(self): """Writes the variables and their types & spmv parts""" func_vars = ["", 'cdef size_t row', 'cdef unsigned int num_rows = vec.shape[0]', "cdef double complex * " + 'out = <complex *>PyDataMem_NEW_ZEROED(num_rows,sizeof(complex))'] func_vars.append(" ") tdterms = self.h_tdterms hinds = 0 spline = 0 for ht in self.h_terms: hstr = str(ht) # Monte-carlo evolution if self.type == 'mc': if ht in self.h_td_inds: if isinstance(tdterms[hinds], str): td_str= tdterms[hinds] elif isinstance(tdterms[hinds], Cubic_Spline): S = tdterms[hinds] if not S.is_complex: td_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, spline) else: td_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, spline) spline += 1 hinds += 1 else: td_str = "1.0" str_out = "spmvpy(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, &out[0], num_rows)" % ( ht, ht, ht, td_str) func_vars.append(str_out) # Master and Schrodinger evolution else: if self.h_tdterms[ht] == "1.0": if self.use_openmp and self.omp_components[ht]: str_out = "spmvpy_openmp(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], 1.0, out, num_rows, %s)" % ( ht, ht, ht, self.omp_threads) else: str_out = "spmvpy(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], 1.0, out, num_rows)" % ( ht, ht, ht) else: if isinstance(self.h_tdterms[ht], str): if self.use_openmp and self.omp_components[ht]: str_out = "spmvpy_openmp(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, out, num_rows, %s)" % ( ht, ht, ht, self.h_tdterms[ht], self.omp_threads) else: str_out = "spmvpy(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, out, num_rows)" % ( ht, ht, ht, self.h_tdterms[ht]) elif isinstance(self.h_tdterms[ht], Cubic_Spline): S = self.h_tdterms[ht] if not S.is_complex: interp_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, spline) else: interp_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, spline) spline += 1 if self.use_openmp and self.omp_components[ht]: str_out = "spmvpy_openmp(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, out, num_rows, %s)" % ( ht, ht, ht, interp_str, self.omp_threads) else: str_out = "spmvpy(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, out, num_rows)" % ( ht, ht, ht, interp_str) #Do nothing if not a specified type else: str_out= '' func_vars.append(str_out) cstr = 0 if len(self.c_tdterms) > 0: # add a spacer line between Hamiltonian components and collapse # components. func_vars.append(" ") terms = len(self.c_tdterms) tdterms = self.c_tdterms cinds = 0 for ct in range(terms): cstr = str(ct + hinds + 1) str_out = "spmvpy(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, out, num_rows)" % ( cstr, cstr, cstr, " (" + tdterms[ct] + ")**2") cinds += 1 func_vars.append(str_out) #Collapse operators have cubic spline td-coeffs if len(self.c_td_splines) > 0: func_vars.append(" ") for ct in range(len(self.c_td_splines)): S = self.c_td_splines[ct] c_idx = self.c_td_spline_flags[ct] if not S.is_complex: interp_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, spline) else: interp_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, spline) spline += 1 #check if need to wrap string with ()**2 if c_idx > 0: interp_str = "("+interp_str+")**2" c_idx = abs(c_idx) if self.use_openmp and self.omp_components[ht]: str_out = "spmvpy_openmp(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, out, num_rows, %s)" % ( c_idx, c_idx, c_idx, interp_str, self.omp_threads) else: str_out = "spmvpy(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, out, num_rows)" % ( c_idx, c_idx, c_idx, interp_str) func_vars.append(str_out) return func_vars def func_which(self): """Writes 'else-if' statements forcollapse operator eval function""" out_string = [] ind = 0 out_string.append("cdef size_t kk") out_string.append("cdef complex ctd = %s" % self.c_tdterms[ind]) for k in self.c_td_inds: out_string.append("if which == " + str(k) + ":") out_string.append("""\ for kk in range(num_rows): out[kk] *= ctd """) ind += 1 return out_string def func_which_expect(self): """Writes 'else-if' statements for collapse expect function """ out_string = [] ind = 0 for k in self.c_td_inds: out_string.append("if which == " + str(k) + ":") out_string.append(" out *= conj(" + self.c_tdterms[ind] + ")") ind += 1 return out_string def func_end(self): return """\ cdef np.npy_intp dims = num_rows cdef np.ndarray[complex, ndim=1, mode='c'] arr_out = np.PyArray_SimpleNewFromData(1, &dims, np.NPY_COMPLEX128, out) PyArray_ENABLEFLAGS(arr_out, np.NPY_ARRAY_OWNDATA) return arr_out """ def func_end_real(self): return "return real(out)" def cython_preamble(use_openmp=False): """ Returns list of code segments for Cython preamble. """ if use_openmp: openmp_string='from qutip.cy.openmp.parfuncs cimport spmvpy_openmp' else: openmp_string='' return ["""#!python #cython: language_level=3 # This file is generated automatically by QuTiP. # (C) 2011 and later, QuSTaR import numpy as np cimport numpy as np cimport cython np.import_array() cdef extern from "numpy/arrayobject.h" nogil: void PyDataMem_NEW_ZEROED(size_t size, size_t elsize) void PyArray_ENABLEFLAGS(np.ndarray arr, int flags) """ +openmp_string+ """ from qutip.cy.spmatfuncs cimport spmvpy from qutip.cy.interpolate
from datanator.schema_2 import transform from datanator_query_python.config import config import unittest import numpy as np class TestTransform(unittest.TestCase): @classmethod def setUpClass(cls): conf = config.SchemaMigration() cls.des_col = "transformation-test" cls.src = transform.Transform(MongoDB=conf.SERVER, db="test", des_col=cls.des_col, username=conf.USERNAME, password=<PASSWORD>, max_entries=20, verbose=True) @classmethod def tearDownClass(cls): pass # cls.src.db_obj.drop_collection(cls.des_col) # @unittest.skip("for now") def test_parse_docs(self): self.src.process_docs("rna_modification", db="datanator") @unittest.skip("passed") def test_build_uniprot_entity(self): obj = { "uniprot_id": "Q75IW1", "add_id": [ { "name_space": "gene_name_alt", "value": None }, { "name_space": "gene_name_orf", "value": "OsJ_11271 OSJNBb0059G13.19" }, { "name_space": "gene_name_oln", "value": "Os03g0416300 LOC_Os03g30260" } ], "ancestor_name": [ "cellular organisms", "Eukaryota", "Viridiplantae", "Streptophyta", "Streptophytina", "Embryophyta", "Tracheophyta", "Euphyllophyta", "Spermatophyta", "Magnoliophyta", "Mesangiospermae", "Liliopsida", "Petrosaviidae", "commelinids", "Poales", "Poaceae", "BOP clade", "Oryzoideae", "Oryzeae", "Oryzinae", "Oryza", "Oryza sativa" ], "ancestor_taxon_id": [ 131567, 2759, 33090, 35493, 131221, 3193, 58023, 78536, 58024, 3398, 1437183, 4447, 1437197, 4734, 38820, 4479, 359160, 147367, 147380, 1648021, 4527, 4530 ], "canon_anc_ids": [ 131567, 2759, 33090, 35493, 4447, 38820, 4479, 4527, 4530 ], "canon_anc_names": [ "cellular organisms", "Eukaryota", "Viridiplantae", "Streptophyta", "Liliopsida", "Poales", "Poaceae", "Oryza", "Oryza sativa" ], "canonical_sequence": "MARFLLGAAAIALLAGVSSLLLMVPFAEAYDPLDPNGNITIKWDITQWTPDGYVAVVTIYNFQKYRHIQAPGWSLGWAWAKKEIIWSMAGGQATEQGDCSAFKANIPHCCKRDPRVVDLVPGAPYNMQFGNCCKGGVLTSWVQDPLNAVASFQITVGHSGTSNKTVKAPKNFTLKAPGPGYSCGLAQEVKPPTRFISLDGRRTTQAHVTWNVTCTYSQFVAQRAPTCCVSLSSFYNETIVNCPKCACGCQNKKPGSCVEGNSPYLASVVNGPGKGSLTPLVQCTPHMCPIRVHWHVKLNYRDYWRVKVTITNWNYRMNYSQWNLVVQHPNFENVSTVFSFNYKSLNPYGVINDTAMMWGVKYYNDLLMVAGPDGNVQSELLFRKDRSTFTFDKGWAFPRRIYFNGESCVMPSPDLYPWLPPSSTPRFRTVFLLMSFLVCGTLAFLHNHLVLDKNCGKC", "ec_number": None, "entrez_id": "4333115", "entry_name": "COBL2_ORYSJ", "gene_name": "BC1L2", "ko_name": [ None ], "ko_number": None, "length": 458, "mass": "51107", "ncbi_taxonomy_id": 39947, "protein_name": "COBRA-like protein 2 (Protein BRITTLE CULM1-like 2)", "schema_version": "2", "species_name": "Oryza sativa Japonica Group", "status": "reviewed", "abundances": [ { "organ": "WHOLE_ORGANISM", "abundance": "1447" }, { "organ": "WHOLE_ORGANISM", "abundance": "119" }, { "organ": "WHOLE_ORGANISM", "abundance": "1219" }, { "organ": "WHOLE_ORGANISM", "abundance": "2443" }, { "organ": "WHOLE_ORGANISM", "abundance": "1984" }, { "organ": "WHOLE_ORGANISM", "abundance": "2883" }, { "organ": "WHOLE_ORGANISM", "abundance": "2984" }, { "organ": "WHOLE_ORGANISM", "abundance": "2595" }, { "organ": "WHOLE_ORGANISM", "abundance": "389" }, { "organ": "WHOLE_ORGANISM", "abundance": "2373" }, { "organ": "WHOLE_ORGANISM", "abundance": "3052" }, { "organ": "WHOLE_ORGANISM", "abundance": "2763" }, { "organ": "WHOLE_ORGANISM", "abundance": "1992" }, { "organ": "WHOLE_ORGANISM", "abundance": "1321" }, { "organ": "WHOLE_ORGANISM", "abundance": "8918" }, { "organ": "WHOLE_ORGANISM", "abundance": "1730" }, { "organ": "WHOLE_ORGANISM", "abundance": "1463" }, { "organ": "WHOLE_ORGANISM", "abundance": "1730" } ], "sabio_kinlaw_id": [ 4573, 4574, 4575, 4576 ], "modifications": [ { "pro_id": "PR:000024921", "uniprot_id": "P0AFG8-1", "processing": "2-887", "deletions": np.NaN, "processsed_sequence_iubmb": "SERFPNDVDPIETRDWLQAIESVIREEGVERAQYLIDQLLAEARKGGVNVAAGTGISNYINTIPVEEQPEYPGNLELERRIRSAIRWNAIMTVLRASKKDLELGGHMASFQSSATIYDVCFNHFFRARNEQDGGDLVYFQGHISPGVYARAFLEGRLTQEQLDNFRQEVHGNGLSSYPHPKLMPEFWQFPTVSMGLGPIGAIYQAKFL<KEY>", "processsed_formula": "C4436H6965N1217O1216S27", "processsed_molecular_weight": 97668.439, "processsed_charge": 98, "modifications": "K --> MOD:00064 (716)", "crosslinks": np.NaN, "modified_sequence_abbreviated_bpforms": "SERFPNDVDPIETRDWLQAIESVIREEGVERAQYLIDQLLAEARKGGVNVAAGTGISNYINTIPVEEQPEYPGNLELERRIR<KEY>AA<KEY>", "modified_sequence_bpforms": "SERFPNDVDPIETRDWLQAIESVIREEGVERAQYLIDQLLAEARKGGVNVAAGTGISNYINTIPVEEQPEYPGNLELERRIRSAIRWNAIMTVLRASKKDLELGGHMASFQSSATIYDVCFNHFF<KEY>AA<KEY>", "concrete": True, "modified_formula": "C4438H6966N1217O1217S27", "modified_molecular_weight": 97709.46800000001, "modified_charge": 97, "modifications_formula": "C2HO", "modifications_molecular_weight": 41.028999999994994, "modifications_charge": -1, "pro_issues": np.NaN, "monomeric_form_issues": np.NaN, "reference": { "doi": "10.1093/nar/gkw1075" } }, { "pro_id": "PR:000036675", "uniprot_id": "P0AFG8-1", "processing": "2-887", "deletions": np.NaN, "processsed_sequence_iubmb": "SERFPNDVDPIETRDWLQAIESVIREEGVERAQYLIDQLLAEARKGGVNVAAGTGISNYINTIPVEEQPEYPGNLELERRIRSAIRWNAIMTVLRASKKDLELGGHMASFQSSATIYDVCFNHFFRARNEQDGGDLVYFQGHISPGVYARAFLEGRLTQEQLDNFRQEVHGNGLSSYPHPKLMPEFWQFPTVSMGLGPIGAIYQAKFLKYLEHRGLKDTSKQTVYAFLGDGEMDEPESKGAITIATREKLDNLVFVINCNLQRLDGPVTGNGKIINELEGIFEGAGWNVIKVMWGSRWDELLRKDTSGKLIQLMNETVDGDYQTFKSKDGAYVREHFFGKYPETAALVADWTDEQIWALNRGGHDPKKIYAAFKKAQETKGKATVILAHTIKGYGMGDAAEGKNIAHQVKKMNMDGVRHIRDRFNVPVSDADIEKLPYITFPEGSEEHTYLHAQRQKLHGYLPSRQPNFTEKLELPSLQDFGALLEEQSKEISTTIAFVRALNVMLKNKSIKDRLVPIIADEARTFGMEGLFRQIGIYSPNGQQYTPQDREQVAYYKEDEKGQILQEGINELGAGCSWLAAATSYSTNNLPMIPFYIYYSMFGFQRIGDLCWAAGDQQARGFLIGGTSGRTTLNGEGLQHEDGHSHIQSLTIPNCISYDPAYAYEVAVIMHDGLERMYGEKQENVYYYITTLNENYHMPAMPEGAEEGIRKGIYKLETIEGSKGKVQLLGSGSILRHVREAAEILAKDYGVGSDVYSVTSFTELARDGQDCERWNMLHPLETPRVPYIAQVMNDAPAVASTDYMKLFAEQVRTYVPADDYRVLGTDGFGRSDSRENLRHHFEVDASYVVVAALGELAKRGEIDKKVVADAIAKFNIDADKVNPRLA", "processsed_formula": "C4436H6965N1217O1216S27", "processsed_molecular_weight": 97668.439, "processsed_charge": 98, "modifications": "K --> MOD:00064 (716)", "crosslinks": np.NaN, "modified_sequence_abbreviated_bpforms": "SERFPNDVDPIETRDWLQAIESVIREEGVERAQYLIDQLLAEARKGGVNVAAGTGISNYINTIPVEEQPEYPGNLELERRIRSAIRWNAIMTVLRASKKDLELGGHMASFQSSATIYDVCFNHFFRARNEQDGGDLVYFQGHISPGVYARAFLEGRLTQEQLDNFRQEVHGNGLSSYPHPKLMPEFWQFPTVSMGLGPIGAIYQAKFLKYLEHRGLKDTSKQTVYAFLGDGEMDEPESKGAITIATREKLDNLVFVINCNLQRLDGPVTGNGKIINELEGIFEGAGWNVIKVMWGSRWDELLRKDTSGKLIQLMNETVDGDYQTFKSKDGAYVREHFFGKYPETAALVADWTDEQIWALNRGGHDPKKIYAAFKKAQETKGKATVILAHTIKGYGMGDAAEGKNIAHQVKKMNMDGVRHIRDRFNVPVSDADIEKLPYITFPEGSEEHTYLHAQRQKLHGYLPSRQPNFTEKLELPSLQDFGALLEEQSKEISTTIAFVRALNVMLKNKSIKDRLVPIIADEARTFGMEGLFRQIGIYSPNGQQYTPQDREQVAYYKEDEKGQILQEGINELGAGCSWLAAATSY<KEY>SMFGFQRIGDLCWAAGDQQARGFLIGGTSGRTTLNGEGLQHEDGHSHIQSLTIPNCISYDPAYAYEVAVIMHDGLERMYGEKQENVYYYITTLNENYHMPAMPEGAEEGIRKGIY{AA<KEY>", "modified_sequence_bpforms": "SERFPNDVDPIETRDWLQAIESVIREEGVERAQYLIDQLLAEARKGGVNVAAGTGISNYINTIPVEEQPEYPGNLELERRIRSAIRWNAIMTVLRASKKDLELGGHMASFQSSATIYDVCFNHFFRARNEQDGGDLVYFQGHISPGVYARAFLEGRLTQEQLDNFRQEVHGNGLSSYPHPKLMPEFWQFPTVSMGLGPIGAIYQAKFLKYLEHRGLKDTSKQTVYAFLGDGEMDEPESKGAITIATREKLDNLVFVINCNLQRLDGPVTGNGKIINELEGIFEGAGWNVIKVMWGSRWDELLRKDTSGKLIQLMNETVDGDYQTFKSKDGAYVREHFFGKYPETAALVADWTDEQIWALNRGGHDPKKIYAAFKKAQETKGKATVILAHTIKGYGMGDAAEGKNIAHQVKKMNMDGVRHIRDRFNVPVSDADIEKLPYITFPEGSEEHTYLHAQRQKLHGYLPSRQPNFTEKLELPSLQDFGALLEEQSKEISTTIAFVRALNVMLKNKSIKDRLVPIIADEARTFGMEGLFRQIGIYSPNGQQYTPQDREQVAYYKEDEKGQILQEGINELGAGCSWLAAATSYSTNNLPMIPFYIYYSMFGFQRIGDLCWAAGDQQARGFLIGGTSGRTTLNGEGLQHEDGHSHIQSLTIPNCISYDPAYAYEVAVIMHDGLERMYGEKQENVYYYITTLNENYHMPAMPEGAEEGIRKGIY{AA0055}LETIEGSKGKVQLLGSGSILRHVREAAEILAKDYGVGSDVYSVTSFTELARDGQDCERWNMLHPLETPRVPYIAQVMNDAPAVASTDYMKLFAEQVRTYVPADDYRVLGTDGFGRSDSRENLRHHFEVDASYVVVAALGELAKRGEIDKKVVADAIAKFNIDADKVNPRLA", "concrete": True, "modified_formula": "C4438H6966N1217O1217S27", "modified_molecular_weight": 97709.46800000001, "modified_charge": 97, "modifications_formula": "C2HO", "modifications_molecular_weight": 41.028999999994994, "modifications_charge": -1, "pro_issues": np.NaN, "monomeric_form_issues": np.NaN, "reference": { "doi": "10.1093/nar/gkw1075" } } ] } result = self.src.build_uniprot_entity(obj) @unittest.skip("passed") def test_build_uniprot_obs(self): obj = { "uniprot_id": "Q75IW1", "add_id": [ { "name_space": "gene_name_alt", "value": None }, { "name_space": "gene_name_orf", "value": "OsJ_11271 OSJNBb0059G13.19" }, { "name_space": "gene_name_oln", "value": "Os03g0416300 LOC_Os03g30260" } ], "ancestor_name": [ "cellular organisms", "Eukaryota", "Viridiplantae", "Streptophyta", "Streptophytina", "Embryophyta", "Tracheophyta", "Euphyllophyta", "Spermatophyta", "Magnoliophyta", "Mesangiospermae", "Liliopsida", "Petrosaviidae", "commelinids", "Poales", "Poaceae", "BOP clade", "Oryzoideae", "Oryzeae", "Oryzinae", "Oryza", "Oryza sativa" ], "ancestor_taxon_id": [ 131567, 2759, 33090, 35493, 131221, 3193, 58023, 78536, 58024, 3398, 1437183, 4447, 1437197, 4734, 38820, 4479, 359160, 147367, 147380, 1648021, 4527, 4530 ], "canon_anc_ids": [ 131567, 2759, 33090, 35493, 4447, 38820, 4479, 4527, 4530 ], "canon_anc_names": [ "cellular organisms", "Eukaryota", "Viridiplantae", "Streptophyta", "Liliopsida", "Poales", "Poaceae", "Oryza", "Oryza sativa" ], "canonical_sequence": "MARFLLGAAAIALLAGVSSLLLMVPFAEAYDPLDPNGNITIKWDITQWTPDGYVAVVTIYNFQKYRHIQAPGWSLGWAWAKKEIIWSMAGGQATEQGDCSAFKANIPHCCKRDPRVVDLVPGAPYNMQFGNCCKGGVLTSWVQDPLNAVASFQITVGHSGTSNKTVKAPKNFTLKAPGPGYSCGLAQEVKPPTRFISLDGRRTTQAHVTWNVTCTYSQFVAQRAPTCCVSLSSFYNETIVNCPKCACGCQNKKPGSCVEGNSPYLASVVNGPGKGSLTPLVQCTPHMCPIRVHWHVKLNYRDYWRVKVTITNWNYRMNYSQWNLVVQHPNFENVSTVFSFNYKSLNPYGVINDTAMMWGVKYYNDLLMVAGPDGNVQSELLFRKDRSTFTFDKGWAFPRRIYFNGESCVMPSPDLYPWLPPSSTPRFRTVFLLMSFLVCGTLAFLHNHLVLDKNCGKC", "ec_number": None, "entrez_id": "4333115", "entry_name": "COBL2_ORYSJ", "gene_name": "BC1L2", "ko_name": [ None ], "ko_number": None, "length": 458, "mass": "51107", "ncbi_taxonomy_id": 39947, "protein_name": "COBRA-like protein 2 (Protein BRITTLE CULM1-like 2)", "schema_version": "2", "species_name": "Oryza sativa Japonica Group", "status": "reviewed", "abundances": [ { "organ": "WHOLE_ORGANISM", "abundance": "1447" }, { "organ": "WHOLE_ORGANISM", "abundance": "119" }, { "organ": "WHOLE_ORGANISM", "abundance": "1219" }, { "organ": "WHOLE_ORGANISM", "abundance": "2443" }, { "organ": "WHOLE_ORGANISM", "abundance": "1984" }, { "organ": "WHOLE_ORGANISM", "abundance": "2883" }, { "organ": "WHOLE_ORGANISM", "abundance": "2984" }, { "organ": "WHOLE_ORGANISM", "abundance": "2595" }, { "organ": "WHOLE_ORGANISM", "abundance": "389" }, { "organ": "WHOLE_ORGANISM", "abundance": "2373" }, { "organ": "WHOLE_ORGANISM", "abundance": "3052" }, { "organ": "WHOLE_ORGANISM", "abundance": "2763" }, { "organ": "WHOLE_ORGANISM", "abundance": "1992" }, { "organ": "WHOLE_ORGANISM", "abundance": "1321" }, { "organ": "WHOLE_ORGANISM", "abundance": "8918" }, { "organ": "WHOLE_ORGANISM", "abundance": "1730" }, { "organ": "WHOLE_ORGANISM", "abundance": "1463" }, { "organ": "WHOLE_ORGANISM", "abundance": "1730" } ], "sabio_kinlaw_id": [ 4573, 4574, 4575, 4576 ], "modifications": [ { "pro_id": "PR:000024921", "uniprot_id": "P0AFG8-1", "processing": "2-887", "deletions": np.NaN, "processsed_sequence_iubmb": "SER<KEY>EQVRTYVPADDYRVLGTDGFGRSDSRENLRHHFEVDASYVVVAALGELAKRGEIDKKVVADAIAKFNIDADKVNPRLA", "processsed_formula": "C4436H6965N1217O1216S27", "processsed_molecular_weight": 97668.439, "processsed_charge": 98, "modifications": "K --> MOD:00064 (716)", "crosslinks": np.NaN, "modified_sequence_abbreviated_bpforms": "SERFPNDVDPIETRDWLQAIESVIREEGVERAQYLIDQLLAEARKGGVNVAAGTGISNYINTIPVEEQPEYPGNLELERRIRSAIRWNAIMTVLRASKKDLELGGHMASFQSSATIYDVCFNHFFRARNEQDGGDLVYFQGHISPGVYARAFLEGRLTQEQLDNFRQEVHGNGLSSYPHPKLMPEFWQFPTVSMGLGPIGAIYQAKFLKYLEHRGLKDTSKQTVYAFLGDGEMDEPESKGAITIATREKLDNLVFVINCNLQRLDGPVTGNGKIINELEGIFEGAGWNVIKVMWGSRWDELLRKDTSGKLIQLMNETVDGDYQTFKSKDGAYVREHFFGKYPETAALVADWTDEQIWALNRGGHDPKKIYAAFKKAQETKGKATVILAHTIKGYGMGDAAEGKNIAHQVKKMNMDGVRHIRDRFNVPVSDADIEKLPYITFPEGSEEHTYLHAQRQKLHGYLPSRQPNFTEKLELPSLQDFGALLEEQSKEISTTIAFVRALNVMLKNKSIKDRLVPIIADEARTFGMEGLFRQIGIYSPNGQQYTPQDREQVAYYKEDEKGQILQEGINELGAGCSWLAAATSYSTNNLPMIPFYIYYSMFGFQRIGDLCWAAGDQQARGFLIGGTSGRTTLNGEGLQHEDGHSHIQSLTIPNCISYDPAYAYEVAVIMHDGLERMYGEKQENVYYYITTLNENYHMPAMPEGAEEGIRKGIY{AA0055}<KEY>ASYVVVAALGELAK<KEY>AKFNIDADKVNPRLA", "modified_sequence_bpforms": "SER<KEY>{AA0055}LETIEGSKGKVQLLGSGSILRHVREAAEILAKDYGVGSDVYSVTSFTELARDGQDCERWNMLHPLETPRVPYIAQVMNDAPAVASTDYMKLFAEQVRTYVPADDYRVLGTDGFGRSDSRENLRHHFEVDASYVVVAALGELAKRGEIDKKVVADAIAKFNIDADKVNPRLA", "concrete": True, "modified_formula": "C4438H6966N1217O1217S27", "modified_molecular_weight": 97709.46800000001, "modified_charge": 97, "modifications_formula": "C2HO", "modifications_molecular_weight": 41.028999999994994, "modifications_charge": -1, "pro_issues": np.NaN, "monomeric_form_issues": np.NaN, "reference": { "doi": "10.1093/nar/gkw1075" } }, { "pro_id": "PR:000036675", "uniprot_id": "P0AFG8-1", "processing": "2-887", "deletions": np.NaN, "processsed_sequence_iubmb": "SERFPNDVDPIETRDWLQAIESVIREEGVERAQYLIDQLLAEARKGGVNVAAGTGISNYINTIPVEEQPEYPGNLELERRIRSAIRWNAIMTVLRASKKDLELGGHMASFQSSATIYDVCFNHFFRARNEQDGGDLVYFQGHISPGVYARAFLEGRLTQEQLDNFRQEVHGNGLSSYPHPKLMPEFWQFPTVSMGLGPIGAIYQAKFLKYLEHRGLKDTSKQTVYAFLGDGEMDEPESKGAITIATREKLDNLVFVINCNLQRLDGPVTGNGKIINELEGIFEGAGWNVIKVMWGSRWDELLRKDTSGKLIQLMNETVDGDYQTFKSKDGAYVREHFFGKYPETAALVADWTDEQIWALNRGG<KEY>", "processsed_formula": "C4436H6965N1217O1216S27", "processsed_molecular_weight": 97668.439, "processsed_charge": 98, "modifications": "K --> MOD:00064 (716)", "crosslinks": np.NaN, "modified_sequence_abbreviated_bpforms": "SERFPNDVDPIETRDWLQAIESVIREEGVERAQYLIDQLLAEARKGGVNVAAGTGISNYINTIPVEEQPEYPGNLELERRIRSAIRWNAIMTVLRASKKDLELGGHMASFQSSATIYDVCFNHFFRARNEQDGGDLVYFQGHISPGVYARAFLEGRLTQEQLDNFRQEVHGNGLSSYPHPKLMPEFWQFPTVSMGLGPIGAIYQAKFLKYLEHRGLKDTSKQTVYAFLGDGEMDEPESKGAIT<KEY>AA<KEY>", "modified_sequence_bpforms": "SER<KEY>GLKDTSKQTVYAFLGDGEMDEPESKGAITIATREKLDNLVFVINCNLQRLDGPVTGNGKIINELEGIFEG<KEY>AA<KEY>", "concrete": True, "modified_formula": "C4438H6966N1217O1217S27", "modified_molecular_weight": 97709.46800000001, "modified_charge": 97, "modifications_formula": "C2HO", "modifications_molecular_weight": 41.028999999994994, "modifications_charge": -1, "pro_issues": np.NaN, "monomeric_form_issues": np.NaN, "reference": { "doi": "10.1093/nar/gkw1075" } } ] } self.assertEqual(self.src.build_uniprot_observation({}), {}) self.assertEqual(self.src.build_uniprot_observation(obj)["entity"]["type"], "protein") @unittest.skip("passed") def test_build_rna_observation(self): obj = { "uniprot_id": "Q8TUR2", "halflives": [ { "halflife": 4041.87006, "std": 523.16592, "std_over_avg": 0.1294366004, "unit": "s", "reference": [ { "doi": "10.1186/s12864-016-3219-8" } ], "growth_medium": "TMA", "ordered_locus_name": "MA0001", "ar_cog": "arCOG00468", "cog_class": "L", "cog": "COG1474", "species": "Methanosarcina acetivorans", "ncbi_taxonomy_id": 188937 }, { "systematic_name": "YJL194W", "halflife": 1200, "unit": "s", "species": "Saccharomyces cerevisiae W303", "ncbi_taxonomy_id": 580240, "r_squared": 0.98, "reference": [ { "doi": "10.1091/mbc.e11-01-0028" } ] }, { "accession_id": "NM_031449 ", "probeset_id": 3000010, "values": [ { "gm07029": 6.200316296854718, "biological_replicates": "a1", "note": "independent cell cultures for the same cell line", "unit": "hr" }, { "gm07029": 5.817285876322001, "biological_replicates": "a3", "note": "independent cell cultures for the same cell line", "unit": "hr" }, { "gm10835": 4.167696688892588, "biological_replicates": "a1", "note": "independent cell cultures for the same cell line", "unit": "hr" }, { "gm10835": 4.454436766714646, "biological_replicates": "a2", "note": "independent cell cultures for the same cell line", "unit": "hr" }, { "gm10835": 4.0912138205438024, "biological_replicates": "a3", "note": "independent cell cultures for the same cell line", "unit": "hr" }, { "gm12813": 7.853596564318888, "biological_replicates": "a1", "note": "independent cell cultures for the same cell line", "unit": "hr" }, { "gm12813": 8.231318451169917, "technical_replicates": "a1", "note": "separate RNA aliquots from the same cell culture", "unit": "hr" }, { "gm12813": 7.958703606479381, "biological_replicates": "a2", "note": "independent cell cultures for the same cell line", "unit": "hr" }, { "gm12813": 7.798393420876806, "technical_replicates": "a2", "note": "separate RNA aliquots from the same cell culture", "unit": "hr" }, { "gm12813": 7.167623222693315, "technical_replicates": "a3", "note": "separate RNA aliquots from the same cell culture", "unit": "hr" }, { "gm07019": 5.640622176, "unit": "hr" }, { "gm12812": 6.162088116, "unit": "hr" }, { "gm12814": 6.042021467, "unit": "hr" }, { "gm12815": 6.758158592, "unit": "hr" } ], "anova_3": 3.923e-7, "anova_7": 0.00000515675001947275, "false_discovery_rate_3": 0.0004363744, "false_discovery_rate_7": 0.00154363733333333, "species": "Homo sapiens", "ncbi_taxonomy_id": 9606, "reference": [ { "doi": "10.1038/srep01318" } ], "gene_symbol": "ZMIZ2 " }, { "chromosome": "chr10", "systematic_name": "YJL194W", "gene_name": "CDC6", "type": "coding_dna_sequences", "halflife": 1593.6956414259646, "unit": "s", "species": "Saccharomyces cerevisiae S288C", "ncbi_taxonomy_id": 559292, "reference": [ { "doi": "10.1016/j.cell.2013.12.026" } ] }, { "halflife": 353.6181058470822, "r_sqaured": 0.9974211229873777, "unit": "s", "reference": [ { "doi": "10.1093/nar/gks1019", "pubmed_id": "23125364" } ], "growth_medium": "Middlebrook 7H9 with the ADC supplement (Difco) and 0.05% Tween80, at 37 degree celcius.", "ordered_locus_name": "MSMEG_1867", "species": "Mycolicibacterium smegmatis MC2 155", "ncbi_taxonomy_id": 246196 }, { "halflife": 489.00036341439363, "variation_coefficient": 18.2337631285916, "species": "Escherichia coli str. K-12 substr. MG1655", "ncbi_taxonomy_id": 511145, "unit": "s", "reference": [ { "doi": "10.1093/nar/gkt1150" } ], "growth_medium": "M9 minimal medium supplemented with glucose", "ordered_locus_name": "b0060", "doubling_time": { "value": 6.9, "unit": "h" } }, { "transcript_size": 1938, "cds_size": 918, "intron_size": 6906, "genomic_size": 8844, "intron_count": 8, "halflife": 21797.96271, "r_sqaured": 0.988426426, "standard_error": 0.019283288, "unit": "s", "reference": [ { "doi": "10.1101/gr.131037.111", "pubmed_id": "22406755" } ], "accession_id": [ "AK088066", "AK133695", "BC003426", "NM_145371" ], "ncbi_taxonomy_id": 10090, "species": "Mus musculus" }, { "halflife": 113.13485976, "expression_reads_per_kb_per_mb": 19.37889675, "quantification_method": "Illumina GA-II", "transcriptional_start_sites": 160034, "transcriptional_end_sites": 162771, "unit": "s", "operon": [ "TU_160034-162771_F" ], "reference": [ { "doi": "10.1186/gb-2012-13-4-r30", "pubmed_id": "22537947" } ], "growth_medium": "Luria-Bertani (LB) broth (500 ml) at 30 degree
current nickname for long_name in long_names: short_name = name_dict[long_name] # If the short_name is already in nicknames_set, that means it is a non-unique nickname # and we will record that accordingly if short_name in nicknames_set: non_unique_nicknames.add(short_name) # we have not yet seen this nickname else: nicknames_set.add(short_name) # Figure out ALL long names associated with the non-unique short names, since these are the # long names we will need to make new nicknames for. trouble_long_names = set() # iterate through all long names for long_name in long_names: # find nickname short_name = name_dict[long_name] # check if this nickname is in set of nicknames we know are non-unique if short_name in non_unique_nicknames: # Record this long name that yields a non-unique short name trouble_long_names.add(long_name) # Operate on all names that are associated with the non-unique short nicknames for long_name in trouble_long_names: # Set the nickname in name_dict to a new nickname # The new nickname is the long name, split by underscores (or spaces replaced with # underscores), where a number of chunks--corresponding to the iteration value--are joined # together to create a new name, separated by underscores. For example, if the long name # was my_long_name_is_this, and iteration was 3, the string would be my_long_time. name_dict[long_name] = '_'.join(long_name.replace(" ","").split('_')[0:iteration]) shorten_feature_names_helper(name_dict, long_names, iteration) return name_dict, long_names, iteration ## The following three functions are used to create a subset dataframe, usually for the purpose of ## running code faster, where a feature is selected (usually Sample_ID), and the contribution of the ## unique values to that df are evaluated. A new subset df, with user-defined length, either has ## the same proprotions of unique values in the specified feature, or even proprotions, as defined ## by the user. """ This function takes in a pandas dataframe (df), a string that is a header name in the df (col), and an integer (count) of the desired length of the returned dataframe. The function will return a dataframe of value counts of the unqiue values in df[col] as index, the 'current_row_count' column of the number of rows corresponding to each uniqe df[col] value, 'prop' corresponding to the proportion of each unique col's values rows in the oritinal dataframe, and 'desired_row_count', which is the number of that unique col's value's rows in a new dataframe, where the proprotions are the same as in the original df. """ def maintain_value_counts(df, col, count): # Values of each feature (counts) values = df[col].value_counts().sort_index() # Proportion of each unique col value in df props = values/df.shape[0] # Proportion of contributino of each unique col value, multiplied by the total number of lines # we want in a future df desired_rows = (props*count).round().astype(int) # Store and return data counts_df = pd.DataFrame( {'current_row_count':values,'prop':props,'desired_row_count':desired_rows}) return counts_df """ This function takes in a pandas dataframe (df), a string that is a header name in the df (col), and an integer (count) of the desired length of the returned dataframe. The function will return a dataframe of value counts of the unqiue values in df[col] as index, the 'current_row_count' column of the number of rows corresponding to each uniqe df[col] value, 'prop' corresponding to the proportion of each unique col's values rows in the oritinal dataframe, and 'desired_row_count', which is the number of that unique col's value's rows in a new dataframe, where the proprotions are equal to each other. """ def create_equal_value_counts(df, col, count): # Values of each feature (counts) values = df[col].value_counts().sort_index() # Proportion of each unique col value in df props = values/df.shape[0] # We want ech unique col value wants to contribute the same number of rows in future df desired_rows = round((count / len(df[col].unique()))) # Store and treturn data counts_df = pd.DataFrame({'current_row_count':values,'prop':props}) # We do not want more rows for a given unique col value than exist counts_df['desired_row_count'] = counts_df.apply( lambda row: min(desired_rows, row['current_row_count']), axis = 1) return counts_df """ This function takes in: a pandas dataframe (df); a string column of interest (col) in that df; an integer corresponding to the length of the returned df (count); and a string specifying ratio type (ratio), which must be either 'equal' or 'original'. The funcition returns a dataframe where rows of the input df were randomly sampled, without repalcement, to create a df of length counrt, where the proportion of each unique col value is either equal or the same as it was originally. """ def create_subset(df, col, count, ratio): # Check for acceptable parameter if ratio not in ['equal','original']: print("'ratio' must be either 'equal' or 'original'") print('Exiting...') return None if count > df.shape[0]: print(str(count) + " greater than dataframe length. Usuing df length (" + str(df.shape[0]) \ + ") instead.") count = min(count, df.shape[0]) # Generate helper df to gete expected row counts for each unique df[col] value if ratio == 'original': print('here') counts_df = maintain_value_counts(df, col, count) else: counts_df = create_equal_value_counts(df, col, count) # Create a dataframe to hold subset data subset_df = pd.DataFrame(columns = df.columns) # Iterate through all unique df[col] values, sample correct number of rows, and add them to # output df for c in df[col].unique(): a = counts_df.loc[counts_df.index == c,'current_row_count'].values[0] size = int(counts_df.loc[counts_df.index == c,'desired_row_count'].values[0]) random_rows = np.random.choice(a = a, size = size, replace = False) df_sample = df.loc[df[col] == c,:] subset_df = subset_df.append(df_sample.iloc[random_rows,:]) return subset_df """ This function creates a seaborn heatmap and saves the figure as a png. If given annotatin data, will plot row/column colors and create corresponding legends. It requies: a string title (title) for the plot; a pandas dataframe of data to plot (data); a string method for plotting (method), e.g., 'ward'; a string metric for plotting the distance between features, e.g., 'correlation', or 'euclidean'; a string specifiy the colormape to be used (cmap); a dictionary of optional colorbar keyword arguments (cbar_kws), e.g., for use in labeling colorbar scale legend; a list of labels for the x-axis features (xticklabels); a string for the directory in which the file whould be saved (save_loc); boolean values for row and column clustering (row_cluster, col_cluster); and an optional dictionary containing annotations for row and column colors. If not provided in annotations, no data will be plotted as row/column colors, and no accompanying legends will be produced. """ def heatmap_function( title, data, method, metric, cmap, cbar_kws, xticklabels, save_loc, row_cluster, col_cluster, annotations = {'rows':[],'cols':[]}): # Set seaborn font scale sb.set(font_scale= 2.0) # Extract row and column mappings row_mappings = [] col_mappings = [] for ann in annotations['rows']: row_mappings.append(ann['mapping']) for ann in annotations['cols']: col_mappings.append(ann['mapping']) # If empty lists, convert to None so seaborn accepts # as the row_colors or col_colors objects if len(row_mappings) == 0: row_mappings = None if len(col_mappings) == 0: col_mappings = None # Create clustermap g = sb.clustermap( data = data, robust = True, method = method, metric = metric, cmap = cmap, row_cluster = row_cluster, col_cluster = col_cluster, figsize = (40,30), row_colors=row_mappings, col_colors=col_mappings, yticklabels = False, cbar_kws = cbar_kws, xticklabels = xticklabels) # Add title g.fig.suptitle(title, fontsize = 60.0) #And now for the legends: # iterate through 'rows', 'cols' for ann_type in sorted(annotations.keys()): # iterate through each individual annotation feature for ann in annotations[ann_type]: # Get the color dictionary color_dict = ann['dict'] # Iterate through all keys in the color dictionary, create/capture handles for legend handles = [] for item in sorted(color_dict.keys()): h = g.ax_col_dendrogram.bar(0,0, color = color_dict[item], label = item, linewidth = 0) handles.append(h) # Add legend to plot legend = plt.legend(handles = handles, loc = ann['location'], title = ann['label'], bbox_to_anchor=ann['bbox_to_anchor'], bbox_transform=plt.gcf().transFigure) ax = plt.gca().add_artist(legend) # Save image filename = os.path.join(save_loc, title.lower().replace(" ","_") + ".png") g.savefig(filename) return None # sources - # https://stackoverflow.com/questions/27988846/how-to-express-classes-on-the-axis-of-a-heatmap-in-seaborn # https://matplotlib.org/3.1.1/tutorials/intermediate/legend_guide.html """ This function takes in a string filename, and an integer (lines_read). It uses subprocess to assess the number of lines in filename. Worth mentioning since this is what failed on some machines (1 PC failure; 1 PC + 1 MacBook success) and not others in development. If the identified number of lines in the file differs from lines_read, then a warning is printed. Nothing is returned. """ def verify_line_no(filename, lines_read): #
<reponame>LoganAMorrison/Hazma<gh_stars>1-10 from typing import Generator, Optional, Union import numpy as np import numpy.typing as npt from scipy.special import gamma # type:ignore # Pion mass in GeV MPI_GEV = 0.13957018 # Neutral Kaon mass in GeV MK0_GEV = 0.497611 # Charged Kaon mass in GeV MKP_GEV = 0.493677 # Charged Kaon mass in GeV META_GEV = 0.547862 def beta2( s: Union[float, npt.NDArray[np.float64]], m1: float, m2: float, ) -> Union[float, npt.NDArray[np.float64]]: """ Return the final state momentum times 4 / s. Parameters ---------- s: Union[float, npt.NDArray] Center-of-mass energy squared. m1: float Mass of the first final state particle. m2: float Mass of the second final state particle. Returns ------- beta: Union[float, npt.NDArray] Final state momentum times 4 / s. """ return np.clip( (1.0 - (m1 + m2) ** 2 / s) * (1.0 - (m1 - m2) ** 2 / s), 0.0, None ) # type:ignore def beta( s: Union[float, npt.NDArray[np.float64]], m1: float, m2: float, ): """ Return the final state momentum times 4 / s. Parameters ---------- s: Union[float, npt.NDArray] Center-of-mass energy squared. m1: float Mass of the first final state particle. m2: float Mass of the second final state particle. Returns ------- beta: Union[float, npt.NDArray] Final state momentum times 4 / s. """ return np.sqrt(beta2(s, m1, m2)) def dhhatds( mres: Union[float, npt.NDArray[np.float64]], gamma: Union[float, npt.NDArray[np.float64]], m1: float, m2: float, ) -> Union[float, npt.NDArray[np.float64]]: """ Compute the derivative of the Hhat(s) function for the Gounaris-Sakurai Breit-Wigner function evaluated at the resonance mass. See ArXiv:1002.0279 Eqn.(4) for details. Parameters ---------- s: Union[float, npt.NDArray] Center-of-mass energy squared. mres: Union[float, npt.NDArray] Mass of the resonance. gamma: Union[float, npt.NDArray] Width of the resonance. m1: float Mass of the first final state particle. m2: float Mass of the second final state particle. Returns ------- dhhat: Union[float, npt.NDArray] The value of the the derivative of Hhat(s) evaluated at the resonance mass. """ v2 = beta2(mres ** 2, m1, m2) v = np.sqrt(v2) r = (m1 ** 2 + m2 ** 2) / mres ** 2 return ( gamma / np.pi / mres / v2 * ( (3.0 - 2.0 * v2 - 3.0 * r) * np.log((1.0 + v) / (1.0 - v)) + 2.0 * v * (1.0 - r / (1.0 - v2)) ) ) def hhat( s: Union[float, npt.NDArray[np.float64]], mres: Union[float, npt.NDArray[np.float64]], gamma: Union[float, npt.NDArray[np.float64]], m1: float, m2: float, reshape=False, ) -> Union[float, npt.NDArray[np.float64]]: """ Compute the Hhat(s) function for the Gounaris-Sakurai Breit-Wigner function. See ArXiv:1002.0279 Eqn.(4) for details. Parameters ---------- s: Union[float, npt.NDArray] Center-of-mass energy squared. mres: Union[float, npt.NDArray] Mass of the resonance. gamma: Union[float, npt.NDArray] Width of the resonance. m1: float Mass of the first final state particle. m2: float Mass of the second final state particle. reshape: Optional[bool] If true, a different value is computed for each `s`. This is useful for computing form-factors for many squared center-of-mass energies at once. Returns ------- hhat: Union[float, npt.NDArray] The value of the Hhat(s) function. """ vr = beta(mres ** 2, m1, m2) v = beta(s, m1, m2) if hasattr(s, "__len__") and reshape: ss = np.array(s) return ( gamma / mres / np.pi * ss[:, np.newaxis] * (v[:, np.newaxis] / vr) ** 3 * np.log((1.0 + v[:, np.newaxis]) / (1.0 - v[:, np.newaxis])) ) return gamma / mres / np.pi * s * (v / vr) ** 3 * np.log((1.0 + v) / (1.0 - v)) def h( s: Union[float, npt.NDArray[np.float64]], mres: Union[float, npt.NDArray[np.float64]], gamma: Union[float, npt.NDArray[np.float64]], m1: float, m2: float, dh: Union[float, npt.NDArray[np.float64]], hres: Union[float, npt.NDArray[np.float64]], reshape=False, ) -> Union[float, npt.NDArray[np.float64]]: """ Compute the H(s) function for the Gounaris-Sakurai Breit-Wigner function. See ArXiv:1002.0279 Eqn.(3) for details. Parameters ---------- s: Union[float, npt.NDArray] Center-of-mass energy squared. mres: Union[float, npt.NDArray] Mass of the resonance. gamma: Union[float, npt.NDArray] Width of the resonance. m1: float Mass of the first final state particle. m2: float Mass of the second final state particle. dh: Union[float, npt.NDArray] Derivative of the of the H-hat function evaluated at the resonance mass. hres: Union[float, npt.NDArray] Value of the H(s) function at s=mres^2. reshape: Optional[bool] If true, a different value is computed for each `s`. This is useful for computing form-factors for many squared center-of-mass energies at once. Returns ------- h: Union[float, npt.NDArray] The value of the H(s) function. """ if hasattr(s, "__len__") and reshape: ss = np.array(s) return ( hhat(ss, mres, gamma, m1, m2, reshape=True) - hres - (ss[:, np.newaxis] - mres ** 2) * dh ) if s != 0.0: return hhat(s, mres, gamma, m1, m2) - hres - (s - mres ** 2) * dh else: return ( -2.0 * (m1 + m2) ** 2 / np.pi * gamma / mres / beta(mres ** 2, m1, m2) ** 3 - hres + mres ** 2 * dh ) def gamma_p( s: Union[float, npt.NDArray[np.float64]], mres: Union[float, npt.NDArray[np.float64]], gamma: Union[float, npt.NDArray[np.float64]], m1: float, m2: float, reshape: Optional[bool] = False, ) -> Union[float, npt.NDArray[np.float64]]: """ Compute the s-dependent width of the resonance. See ArXiv:1002.0279 Eqn.(6) for details. Parameters ---------- s: Union[float, npt.NDArray] Center-of-mass energy squared. mres: Union[float, npt.NDArray] Mass of the resonance. gamma: Union[float, npt.NDArray] Width of the resonance. m1: float Mass of the first final state particle. m2: float Mass of the second final state particle. reshape: Optional[bool] If true, a different value is computed for each `s`. This is useful for computing form-factors for many squared center-of-mass energies at once. Returns ------- gamma: Union[float, npt.NDArray] The s-dependent width. """ v2 = beta2(s, m1, m2) vr2 = beta2(mres ** 2, m1, m2) if hasattr(s, "__len__") and reshape: rp = np.sqrt( np.clip( v2[:, np.newaxis] / vr2, # type:ignore 0.0, None, ) ) return np.sqrt(s)[:, np.newaxis] / mres * rp ** 3 * gamma rp = np.where(vr2 == 0.0, vr2, np.sqrt(np.clip(v2 / vr2, 0.0, None))) return np.sqrt(s) / mres * rp ** 3 * gamma def breit_wigner_gs( s: Union[float, npt.NDArray[np.float64]], mres: Union[float, npt.NDArray[np.float64]], gamma: Union[float, npt.NDArray[np.float64]], m1: float, m2: float, h0: Union[float, npt.NDArray[np.float64]], dh: Union[float, npt.NDArray[np.float64]], hres: Union[float, npt.NDArray[np.float64]], reshape: Optional[bool] = False, ) -> Union[complex, npt.NDArray[np.complex128]]: """ Compute the Gounaris-Sakurai Breit-Wigner function with pion loop corrections included. See ArXiv:1002.0279 Eqn.(2) for details. Parameters ---------- s: Union[float, npt.NDArray] Center-of-mass energy squared. mres: Union[float, npt.NDArray] Mass of the resonance. gamma: Union[float, npt.NDArray] Width of the resonance. m1: float Mass of the first final state particle. m2: float Mass of the second final state particle. h0: Union[float, npt.NDArray] Value of the H(s) function at s=0. dh: Union[float, npt.NDArray] Derivative of the of the H-hat function evaluated at the resonance mass. hres: Union[float, npt.NDArray] Value of the H(s) function at s=mres^2. reshape: Optional[bool] If true, a different value is computed for each `s`. This is useful for computing form-factors for many squared center-of-mass energies at once. Returns ------- bw: Union[float, npt.NDArray] The Breit-Wigner function. """ mr2 = mres ** 2 if hasattr(s, "__len__") and reshape: ss = np.array(s) return (mr2 + h0) / ( mr2 - ss[:, np.newaxis] + h(ss, mres, gamma, m1, m2, dh, hres, reshape=True) - 1j * np.sqrt(ss)[:, np.newaxis] * gamma_p(ss, mres, gamma, m1, m2, reshape=True) ) return (mr2 + h0) / ( mr2 - s + h(s, mres, gamma, m1, m2, dh, hres) - 1j * np.sqrt(s) * gamma_p(s, mres, gamma, m1, m2) ) def breit_wigner_fw( s: Union[float, npt.NDArray[np.float64]], mres: Union[float, complex, npt.NDArray[np.float64]], gamma: Union[float, complex, npt.NDArray[np.float64]], reshape: Optional[bool] = False, ) -> Union[complex, npt.NDArray[np.complex128]]: """ Compute the standard Breit-Wigner with a constant width. See ArXiv:1002.0279 Eqn.(8) for details. Parameters ---------- s: Union[float, npt.NDArray] Center-of-mass energy squared. mres: Union[float, npt.NDArray] Mass of the resonance. gamma: Union[float, npt.NDArray] Width of the resonance. reshape: Optional[bool] If true, a different value is computed for each `s`. This is useful for computing form-factors for many squared center-of-mass energies at once. Returns ------- bw: Union[float, npt.NDArray] The Breit-Wigner function. """ mr2 = mres ** 2 if hasattr(s, "__len__") and reshape: ss = np.array(s) return mr2 / (mr2 - ss[:, np.newaxis] - 1j * mres * gamma) return mr2 / (mr2 - s - 1j * mres * gamma) def
AttributeError as e: client.debug_print_exception(e) await message.reply( client.l( 'must_be_one_of', client.l('color'), [i.name for i in fortnitepy.KairosBackgroundColorPreset] ) ) return avatar = fortnitepy.Avatar( asset=message.args[1], background_colors=background_colors ) client.set_avatar(avatar) await message.reply( client.l( 'set_to', client.l('avatar'), f'{message.args[1]}, {background_colors}' ) ) @command( name='status', usage='{name} [{client.l("message")}]' ) async def status(command: Command, client: 'Client', message: MyMessage) -> None: await client.set_presence(' '.join(message.args[1:])) await message.reply( client.l( 'set_to', client.l('status'), ' '.join(message.args[1:]) ) ) @command( name='banner', usage='{name} [ID] [{client.l("color")}]' ) async def banner(command: Command, client: 'Client', message: MyMessage) -> None: if len(message.args) < 3: await client.show_help(command, message) return await client.party.me.edit_and_keep(partial( client.party.me.set_banner, icon=message.args[1], color=message.args[2], season_level=client.party.me.banner[2] )) await message.reply( client.l( 'set_to', client.l('banner'), f'{message.args[1]}, {message.args[2]}' ) ) @command( name='level', usage='{name} [{client.l("number")}]' ) async def level(command: Command, client: 'Client', message: MyMessage) -> None: if len(message.args) < 2: await client.show_help(command, message) return icon, color = client.party.me.banner[:2] try: level = int(message.args[1]) except ValueError as e: client.debug_print_exception(e) await message.reply( client.l('please_enter_valid_value') ) return await client.party.me.edit_and_keep(partial( client.party.me.set_banner, icon=icon, color=color, season_level=level )) await message.reply( client.l( 'set_to', client.l('level'), level ) ) @command( name='battlepass', usage='{name} [{client.l("number")}]' ) async def battlepass(command: Command, client: 'Client', message: MyMessage) -> None: if len(message.args) < 2: await client.show_help(command, message) return try: tier = int(message.args[1]) except ValueError as e: client.debug_print_exception(e) await message.reply( client.l('please_enter_valid_value') ) return await client.party.me.edit_and_keep(partial( client.party.me.set_battlepass_info, has_purchased=True, level=tier )) await message.reply( client.l( 'set_to', client.l('tier'), tier ) ) @command( name='privacy', usage='{name} [{client.l("privacy")}]' ) async def privacy(command: Command, client: 'Client', message: MyMessage) -> None: if len(message.args) < 2: await client.show_help(command, message) return if not client.party.me.leader: await message.reply( client.l('not_a_party_leader') ) privacies = [ (p.name.lower(), p) for p in PartyPrivacy ] for p, value in privacies: if message.args[1] in client.commands[p]: try: await client.party.set_privacy(value) except fortnitepy.Forbidden as e: client.debug_print_exception(e) await message.reply( client.l('not_a_party_leader') ) return await message.reply( client.l( 'set_to', client.l('privacy'), client.bot.l(f'privacy_{p}') ) ) break else: await message.reply( client.l( 'must_be_one_of', client.l('privacy'), [client.commands[p][0] for p in privacies] ) ) @command( name='voice_chat', usage=( '{name} [{client.l("bool", **self.variables_without_self)}]\n' '{client.l("current_setting", client.l("enabled") ' 'if client.party.voice_chat_enabled else ' 'client.l("disabled"))}' ) ) async def voice_chat(command: Command, client: 'Client', message: MyMessage) -> None: if len(message.args) < 2: await client.show_help(command, message) return if not client.party.me.leader: await message.reply( client.l('not_a_party_leader') ) return if message.args[1] in client.commands['true']: try: await client.party.enable_voice_chat() except fortnitepy.Forbidden as e: client.debug_print_exception(e) await message.reply( client.l('not_a_party_leader') ) return await message.reply( client.l( 'set_to', client.l('voice_chat'), client.l('enabled') ) ) elif message.args[1] in client.commands['false']: try: await client.party.disable_voice_chat() except fortnitepy.Forbidden as e: client.debug_print_exception(e) await message.reply( client.l('not_a_party_leader') ) return await message.reply( client.l( 'set_to', client.l('voice_chat'), client.l('disabled') ) ) else: await client.show_help(command, message) @command( name='promote', usage='{name} [{client.l("name_or_id")}]' ) async def promote(command: Command, client: 'Client', message: MyMessage) -> None: if len(message.args) < 2: await client.show_help(command, message) return if not client.party.me.leader: await message.reply( client.l('not_a_party_leader') ) return users = client.find_users( ' '.join(message.args[1:]), mode=FindUserMode.NAME_ID, method=FindUserMatchMethod.CONTAINS, users=client.party.members, me=message.author ) async def promote(user): member = client.party.get_member(user.id) if member is None: await message.reply( client.l( 'not_in_party', client.name(user) ) ) return ret = await client.promote_member(member, message) if not isinstance(ret, Exception): await message.reply( client.l( 'promote', client.name(member) ) ) if client.config['search_max'] and len(users) > client.config['search_max']: await message.reply( client.l('too_many', client.l('user'), len(users)) ) return if len(users) == 0: await message.reply( client.l( 'not_found', client.l('user'), ' '.join(message.args[1:]) ) ) elif len(users) == 1: await promote(users[0]) else: client.select[message.author.id] = { 'exec': 'await promote(user)', 'globals': {**globals(), **locals()}, 'variables': [ {'user': user} for user in users ] } await message.reply( ('\n'.join([f'{num}: {client.name(user)}' for num, user in enumerate(users, 1)]) + '\n' + client.l('enter_number_to_select', client.l('user'))) ) @command( name='kick', usage='{name} [{client.l("name_or_id")}]' ) async def kick(command: Command, client: 'Client', message: MyMessage) -> None: if len(message.args) < 2: await client.show_help(command, message) return if not client.party.me.leader: await message.reply( client.l('not_a_party_leader') ) return users = client.find_users( ' '.join(message.args[1:]), mode=FindUserMode.NAME_ID, method=FindUserMatchMethod.CONTAINS, users=client.party.members, me=message.author ) async def kick(user): member = client.party.get_member(user.id) if member is None: await message.reply( client.l( 'not_in_party', client.name(user) ) ) return ret = await client.kick_member(member, message) if not isinstance(ret, Exception): await message.reply( client.l( 'kick', client.name(member) ) ) if client.config['search_max'] and len(users) > client.config['search_max']: await message.reply( client.l('too_many', client.l('user'), len(users)) ) return if len(users) == 0: await message.reply( client.l( 'not_found', client.l('user'), ' '.join(message.args[1:]) ) ) elif len(users) == 1: await kick(users[0]) else: client.select[message.author.id] = { 'exec': 'await kick(user)', 'globals': {**globals(), **locals()}, 'variables': [ {'user': user} for user in users ] } await message.reply( ('\n'.join([f'{num}: {client.name(user)}' for num, user in enumerate(users, 1)]) + '\n' + client.l('enter_number_to_select', client.l('user'))) ) @command( name='chatban', usage='{name} [{client.l("name_or_id")}] : ({client.l("reason")})' ) async def chatban(command: Command, client: 'Client', message: MyMessage) -> None: if len(message.args) < 2: await client.show_help(command, message) return if not client.party.me.leader: await message.reply( client.l('not_a_party_leader') ) return text = ' '.join(message.args[1:]).split(' : ') if len(text) == 1: user_name = text[0] reason = None else: user_name, *reason = text reason = ' '.join(reason) users = client.find_users( user_name, mode=FindUserMode.NAME_ID, method=FindUserMatchMethod.CONTAINS, users=client.party.members, me=message.author ) async def chatban(user): member = client.party.get_member(user.id) if member is None: await message.reply( client.l( 'not_in_party', client.name(user) ) ) return ret = await client.chatban_member(member, reason, message) if not isinstance(ret, Exception): if reason is None: await message.reply( client.l( 'chatban', client.name(member) ) ) else: await message.reply( client.l( 'chatban_reason', client.name(member), reason ) ) if client.config['search_max'] and len(users) > client.config['search_max']: await message.reply( client.l('too_many', client.l('user'), len(users)) ) return if len(users) == 0: await message.reply( client.l( 'not_found', client.l('user'), user_name ) ) elif len(users) == 1: await chatban(users[0]) else: client.select[message.author.id] = { 'exec': 'await chatban(user)', 'globals': {**globals(), **locals()}, 'variables': [ {'user': user} for user in users ] } await message.reply( ('\n'.join([f'{num}: {client.name(user)}' for num, user in enumerate(users, 1)]) + '\n' + client.l('enter_number_to_select', client.l('user'))) ) @command( name='hide', usage='{name} ({client.l("name_or_id")})' ) async def hide(command: Command, client: 'Client', message: MyMessage) -> None: if len(message.args) < 2: count = 0 for member in client.party.members: if client.is_hide_for(client.get_user_type(member.id)): if client.party.add_hide_user(member.id): count += 1 try: await client.party.refresh_squad_assignments() except Exception as e: if isinstance(e, fortnitepy.HTTPException): client.debug_print_exception(e) else: client.print_exception(e) text = client.l('error_while_hiding_members') client.send( text, add_p=client.time, file=sys.stderr ) if message is not None: await message.reply(text) return await message.reply( client.l( 'hide_members', count ) ) else: if not client.party.me.leader: await message.reply( client.l('not_a_party_leader') ) return users = client.find_users( ' '.join(message.args[1:]), mode=FindUserMode.NAME_ID, method=FindUserMatchMethod.CONTAINS, users=client.party.members, me=message.author ) async def hide(user): member = client.party.get_member(user.id) if member is None: await message.reply( client.l( 'not_in_party', client.name(user) ) ) return ret = await client.hide_member(member, message) if not isinstance(ret, Exception): await message.reply( client.l( 'hide', client.name(member) ) ) if client.config['search_max'] and len(users) > client.config['search_max']: await message.reply( client.l('too_many', client.l('user'), len(users)) ) return if len(users) == 0: await message.reply( client.l( 'not_found', client.l('user'), ' '.join(message.args[1:]) ) ) elif len(users) == 1: await hide(users[0]) else: client.select[message.author.id] = { 'exec': 'await hide(user)', 'globals': {**globals(), **locals()}, 'variables': [ {'user': user} for user in users ] } await message.reply( ('\n'.join([f'{num}: {client.name(user)}' for num, user in enumerate(users, 1)]) + '\n' + client.l('enter_number_to_select', client.l('user'))) ) @command( name='show', usage='{name} ({client.l("name_or_id")})' ) async def show(command: Command, client: 'Client', message: MyMessage) -> None: if not client.party.me.leader: await message.reply( client.l('not_a_party_leader') ) return if len(message.args) < 2: count = 0 for member in client.party.members: if client.party.remove_hide_user(member.id): count += 1 client.party.update_hide_users([]) try: await client.party.refresh_squad_assignments() except Exception as e: if isinstance(e, fortnitepy.HTTPException): client.debug_print_exception(e) else: client.print_exception(e) text = client.l('error_while_showing_members') client.send( text, add_p=client.time, file=sys.stderr ) if message is not None: await message.reply(text) return await message.reply( client.l( 'show_members', count ) ) else: users = client.find_users( ' '.join(message.args[1:]), mode=FindUserMode.NAME_ID, method=FindUserMatchMethod.CONTAINS, users=client.party.members, me=message.author ) async def show(user): member = client.party.get_member(user.id) if member is None: await message.reply( client.l( 'not_in_party', client.name(user) ) ) return ret = await client.show_member(member, message) if not isinstance(ret, Exception): await message.reply( client.l( 'show', client.name(member) ) ) if client.config['search_max'] and len(users) > client.config['search_max']: await message.reply( client.l('too_many', client.l('user'), len(users)) ) return if len(users) == 0: await message.reply( client.l( 'not_found', client.l('user'), ' '.join(message.args[1:]) ) ) elif len(users) == 1: await show(users[0]) else: client.select[message.author.id] = { 'exec': 'await show(user)', 'globals': {**globals(), **locals()}, 'variables': [ {'user': user} for user in users ] } await message.reply( ('\n'.join([f'{num}: {client.name(user)}' for num, user in enumerate(users, 1)]) + '\n' + client.l('enter_number_to_select', client.l('user'))) ) @command( name='ready', usage='{name}' ) async def ready(command: Command, client: 'Client', message: MyMessage) -> None: await client.party.me.set_ready(fortnitepy.ReadyState.READY) await message.reply( client.l( 'set_to', client.l('ready_state'), client.l('ready_state_ready') ) ) @command( name='unready', usage='{name}' ) async def unready(command: Command, client: 'Client', message: MyMessage) -> None: await client.party.me.set_ready(fortnitepy.ReadyState.NOT_READY) await message.reply( client.l( 'set_to', client.l('ready_state'),
or .. ''' # This is for linux paths only if dest_rel_path in ('', '/'): return current_abs_path # Strip / at start and end of dest dest_rel_path = dest_rel_path.rstrip('/').lstrip('/') if current_abs_path[-1] != '/': current_abs_path += '/' curr_paths = current_abs_path.rstrip('/').lstrip('/').split('/') if len(curr_paths) == 1 and curr_paths[0] == '': curr_paths = [] rel_paths = dest_rel_path.split('/') curr_path_index = len(curr_paths) for x in rel_paths: if x == '.': pass elif x == '..': if curr_path_index == 0: raise ValueError('Relative path tried to go above root !') else: curr_path_index -= 1 curr_paths.pop() elif x == '': raise ValueError("Relative path had // , can't parse") else: curr_paths.append(x) curr_path_index += 1 final_path = '' for index, x in enumerate(curr_paths): final_path += '/' + x if index == curr_path_index: break if final_path == '': final_path = '/' return final_path def GetFileMACTimes(self, file_path): ''' Returns dictionary {c_time, m_time, cr_time, a_time} where cr_time = created time and c_time = Last time inode/mft modified ''' if self.use_native_hfs_parser: return self.hfs_native.GetFileMACTimes(file_path) times = { 'c_time':None, 'm_time':None, 'cr_time':None, 'a_time':None } try: tsk_file = self.macos_FS.open(file_path) times['c_time'] = CommonFunctions.ReadUnixTime(tsk_file.info.meta.ctime) times['m_time'] = CommonFunctions.ReadUnixTime(tsk_file.info.meta.mtime) times['cr_time'] = CommonFunctions.ReadUnixTime(tsk_file.info.meta.crtime) times['a_time'] = CommonFunctions.ReadUnixTime(tsk_file.info.meta.atime) except Exception as ex: log.exception('Error trying to get MAC times') return times def GetExtendedAttribute(self, path, att_name): if self.use_native_hfs_parser: return self.hfs_native.GetExtendedAttribute(path, att_name) def GetExtendedAttributes(self, path): if self.use_native_hfs_parser: return self.hfs_native.GetExtendedAttributes(path) def ExportFolder(self, artifact_path, subfolder_name, overwrite): '''Export an artifact folder to the output\Export\subfolder_name folder. This will export the entire folder and subfolders recursively. This does not export Xattr. Return value is boolean (False if it encountered any errors). ''' export_path = os.path.join(self.output_params.export_path, subfolder_name, os.path.basename(artifact_path)) # create folder try: if not os.path.exists(export_path): os.makedirs(export_path) except (KeyError, ValueError, TypeError, OSError) as ex: log.error ("Exception while creating Export folder " + export_path + "\n Is output folder Writeable?" + "Is it full? Perhaps the drive is disconnected? Exception Details: " + str(ex)) return False # recursively export files/folders try: return self._ExportFolder(artifact_path, export_path, overwrite) except (KeyError, ValueError, TypeError, OSError): log.exception('Exception while exporting folder ' + artifact_path) return False def _ExportFolder(self, artifact_path, export_path, overwrite): '''Exports files/folders from artifact_path to export_path recursively''' artifact_path = artifact_path.rstrip('/') entries = self.ListItemsInFolder(artifact_path, EntryType.FILES_AND_FOLDERS, True) ret = True for entry in entries: new_path = os.path.join(export_path, self._GetSafeFilename(entry['name'])) if entry['type'] == EntryType.FOLDERS: try: if not os.path.exists(new_path): os.mkdir(new_path) except: log.exception("Exception while creating Export folder " + export_path) ret = False continue ret &= self._ExportFolder(artifact_path + '/' + entry['name'], new_path, overwrite) else: # FILE if entry['size'] > 0: ret &= self._ExtractFile(artifact_path + '/' + entry['name'], new_path, entry['dates']) else: log.info('Skipping export of {} as filesize=0'.format(artifact_path + '/' + entry['name'])) return ret def ExportFile(self, artifact_path, subfolder_name, file_prefix='', check_for_sqlite_files=True, overwrite=False): '''Export an artifact (file) to the output\Export\subfolder_name folder. Ideally subfolder_name should be the name of the plugin. If 'overwrite' is set to True, it will not check for existing files. The default behaviour is to check and rename the newly exported file if there is a name collision. If this is an sqlite db, the -journal and -wal files will also be exported. The check for -journal and -wal can be skipped if check_for_sqlite_files=False It is much faster to skip the check if not needed. The Function returns False if it fails to export the file. ''' export_path = os.path.join(self.output_params.export_path, subfolder_name) # create folder try: if not os.path.exists(export_path): os.makedirs(export_path) except Exception as ex: log.error ("Exception while creating Export folder " + export_path + "\n Is output folder Writeable?" + "Is it full? Perhaps the drive is disconnected? Exception Details: " + str(ex)) return False # extract each file to temp folder out_filename = file_prefix + os.path.basename(artifact_path) out_filename = self._GetSafeFilename(out_filename) #filter filenames based on platform (Eg: Windows does not like ?<>/\:*"! in filenames) if overwrite: file_path = os.path.join(export_path, out_filename) else: file_path = CommonFunctions.GetNextAvailableFileName(os.path.join(export_path, out_filename)) if self._ExtractFile(artifact_path, file_path): if check_for_sqlite_files: jrn_file_path = file_path + "-journal" wal_file_path = file_path + "-wal" if self.IsValidFilePath(artifact_path + "-journal"): self._ExtractFile(artifact_path + "-journal", jrn_file_path) if self.IsValidFilePath(artifact_path + "-wal"): self._ExtractFile(artifact_path + "-wal", wal_file_path) return True return False def _ExtractFile(self, artifact_path, export_path, mac_times=None): '''Internal function, just export, no checks!''' if self.ExtractFile(artifact_path, export_path): if not mac_times: mac_times = self.GetFileMACTimes(artifact_path) export_path_rel = os.path.relpath(export_path, start=self.output_params.export_path) if self.is_windows: export_path_rel = export_path_rel.replace('\\', '/') self.output_params.export_log_sqlite.WriteRow([artifact_path, export_path_rel, mac_times['c_time'], mac_times['m_time'], mac_times['cr_time'], mac_times['a_time']]) return True else: log.info("Failed to export '" + artifact_path + "' to '" + export_path + "'") return False def DeserializeNsKeyedPlist(self, plist_file): '''Returns a deserialized version of an NSKeyedArchive plist''' deserialised_plist = process_nsa_plist('', plist_file) return deserialised_plist def ReadPlist(self, path, deserialize=False): '''Safely open and read a plist; returns tuple (True/False, plist/None, "error_message")''' log.debug("Trying to open plist file : " + path) error = '' try: f = self.Open(path) if f != None: try: log.debug("Trying to read plist file : " + path) plist = biplist.readPlist(f) if deserialize: try: f.seek(0) plist = self.DeserializeNsKeyedPlist(f) f.close() return (True, plist, '') except Exception as ex: f.close() error = 'Could not read deserialized plist: ' + path + " Error was : " + str(ex) else: f.close() return (True, plist, '') except biplist.InvalidPlistException as ex: try: # Perhaps this is manually edited or incorrectly formatted by a non-Apple utility # that has left whitespaces at the start of file before <?xml tag # This is assuming XML format! f.seek(0) data = f.read().decode('utf8', 'ignore') f.close() data = data.lstrip(" \r\n\t").encode('utf8', 'backslashreplace') if deserialize: try: temp_file = BytesIO(data) plist = self.DeserializeNsKeyedPlist(temp_file) temp_file.close() return (True, plist, '') except Exception as ex: error = 'Could not read deserialized plist: ' + path + " Error was : " + str(ex) else: plist = biplist.readPlistFromString(data) return (True, plist, '') except (biplist.InvalidPlistException, biplist.NotBinaryPlistException) as ex: error = 'Could not read plist: ' + path + " Error was : " + str(ex) except OSError as ex: error = 'OSError while reading plist: ' + path + " Error was : " + str(ex) else: error = 'Failed to open file' except Exception as ex: error = 'Exception from ReadPlist, trying to open file. Exception=' + str(ex) return (False, None, error) def IsSymbolicLink(self, path): '''Check if path represents a symbolic link''' if self.use_native_hfs_parser: return self.hfs_native.IsSymbolicLink(path) return False def ReadSymLinkTargetPath(self, path): '''Returns the target file/folder's path from the sym link path provided''' f = self.Open(path) if f: target_path = f.read() f.close() return target_path.rstrip(b'\0').decode('utf8', 'backslashreplace') return '' def IsValidFilePath(self, path): '''Check if a file path is valid, does not check for folders!''' if self.use_native_hfs_parser: return self.hfs_native.IsValidFilePath(path) try: valid_file = self.macos_FS.open(path) return True except Exception: pass return False def IsValidFolderPath(self, path): '''Check if a folder path is valid''' if self.use_native_hfs_parser: return self.hfs_native.IsValidFolderPath(path) try: valid_folder = self.macos_FS.open_dir(path) return True except Exception: pass return False def GetFileSize(self, path, error=None): '''For a given file path, gets logical file size, or None if error''' if self.use_native_hfs_parser: return self.hfs_native.GetFileSize(path) try: valid_file = self.macos_FS.open(path) return valid_file.info.meta.size except Exception as ex: log.debug (" Unknown exception from GetFileSize() " + str(ex) + " Perhaps file does not exist " + path) return error def ListItemsInFolder(self, path='/', types_to_fetch=EntryType.FILES_AND_FOLDERS, include_dates=False): ''' Returns a list of files and/or folders in a list Format of list = [ { 'name':'got.txt', 'type':EntryType.FILES, 'size':10, 'dates': {} }, .. ] 'path' should be linux style using forward-slash like '/var/db/xxyy/file.tdc' ''' if self.use_native_hfs_parser: return self.hfs_native.ListItemsInFolder(path, types_to_fetch, include_dates) items = [] # List of dictionaries try: dir = self.macos_FS.open_dir(path) for entry in dir: name = self._GetName(entry) if name == "": continue elif name == "." or name == "..": continue elif not self._IsValidFileOrFolderEntry(entry): continue # this filters for allocated files and folders only entry_type = EntryType.FOLDERS if entry.info.name.type == pytsk3.TSK_FS_NAME_TYPE_DIR else EntryType.FILES if include_dates: path_no_trailing_slash = path.rstrip('/') item = { 'name':name, 'type':entry_type, 'size':self._GetSize(entry), 'dates': self.GetFileMACTimes(path_no_trailing_slash + '/' + name) } else: item = { 'name':name, 'type':entry_type, 'size':self._GetSize(entry) } if types_to_fetch == EntryType.FILES_AND_FOLDERS: items.append( item ) elif types_to_fetch == EntryType.FILES and entry_type == EntryType.FILES: items.append( item ) elif types_to_fetch == EntryType.FOLDERS and entry_type == EntryType.FOLDERS: items.append( item ) except Exception
= destFilename.replace("<site>", self.getSiteID()) #try: if 1 == 1: TextFileUtil.makeWritableCopy(source, fileType, dest, False); self.output("Made makeWritableCopy: " + source + ' ' + \ fileType + ' ' + dest, self._outFile) #except: else: failed = failed + 1 self.output("failed makeWritableCopy: " + source + ' ' + \ fileType + ' ' + dest, self._outFile) if failed == 0: if self._reportingMode not in ["Pretty"]: self.output("All Writable Copies successful", self._outFile) def _fileChanges(self, entry): fileChanges = entry.get("fileChanges", None) if not fileChanges: return False from LockingFile import File failed = 0 for fileName, fileType, changeType, strings, cleanUp in fileChanges: fileName = fileName.replace("<site>", self.getSiteID()) # Get the file lf = TextFileUtil.getTextFile(fileName, fileType) if lf.getName().endswith(".py"): if sys.modules.has_key(fileName): del sys.modules[fileName] try: with File(lf.getFile(), '', 'r') as pythonFile: text = pythonFile.read() except: failed = 1 print "FILE CHANGES failed reading from " + str(lf) raise #self.output("FILE CHANGES (initial) from " +str(lf) + "\n" + text, self._outFile) #DEBUG # Modify it if changeType == "add": text = text + strings elif changeType == "replace": # strings may be a tuple (orig, repl) or # a list of tuples for multiple changes to the same file if type(strings) == tuple: strings = [strings] for orig, repl in strings: strIndex = text.find(orig) text = text.replace(orig, repl) #self.output("FILE CHANGES (chg): " + orig + ' ' + repl, self._outFile) #DEBUG #self.output("FILE CHANGES (mod): " + text, self._outFile) #DEBUG if strIndex < 0: self.output("File change failed for " + orig, self._outFile) failed = 1 # Write it destLf = TextFileUtil.getUserTextFile(lf) try: with File(destLf.getFile(), '', 'w') as pythonFile: pythonFile.write(text) destLf.save() except: failed = 1 print "FILE CHANGES failed writing to " + str(destLf) raise #self.output("FILE CHANGES (saved) to " + str(destLf) + "\n" + text, self._outFile) #DEBUG if len(fileChanges) and not failed: if self._reportingMode not in ["Pretty"]: self.output("All File Changes successful", self._outFile) return True def _determineMaxMinBeginEnd(self, entry): # Determine MaxT MinT MaxRH MinRH begin and end times # relative to gridsStartTime localtime = time.localtime(self._gridsStartTime.unixTime()) localHour = localtime[3] if localtime[8]: # daylight maxBegin = 8 else: maxBegin = 7 self._MaxTBegin = maxBegin - localHour # MaxT begins at 7 am standard time self._MaxTEnd = self._MaxTBegin + 13 self._MinTBegin = self._MaxTBegin + 12 self._MinTEnd = self._MaxTBegin + 12 + 14 self._MinRHBegin = maxBegin - 4 - localHour # MinRH begins at 3 am standard time self._MinRHEnd = self._MinRHBegin + 18 self._MaxRHBegin = self._MinRHBegin + 12 self._MaxRHEnd = self._MinRHBegin + 12 + 18 def _translateHour(self, hour): if type(hour) is not types.StringType: return hour # Suppose hour == "MaxTBegin + 24" and self._MaxTBegin == 1 for tStr in ["MaxTBegin", "MaxTEnd", "MinTBegin", "MinTEnd", "MaxRHBegin", "MaxRHEnd", "MinRHBegin", "MinRHEnd"]: if hour.find(tStr) >= 0: exec "tHour = self._" + tStr # tHour = self._MaxTBegin hour = hour.replace(tStr, `tHour`) # hour == "1 + 24" exec "newHour = " + hour return newHour def _deleteGrids(self, entry): deleteGrids = entry.get("deleteGrids", None) if deleteGrids is None or deleteGrids == []: return self._lastCreateGrids = [] #clear it after deleting grids for gridEntry in deleteGrids: model, elementName, level, startHour, endHour = gridEntry if startHour == "all" or endHour == "all": timeRange = TimeRange.allTimes() else: gridsTR = TimeRange.TimeRange(self._gridsStartTime, self._gridsStartTime + 12 * 3600) timeRange = TimeRange.TimeRange(gridsTR.startTime() + startHour * 3600, gridsTR.startTime() + endHour * 3600) self.deleteGrid(model, elementName, level, timeRange) self.saveElements([elementName], model) if entry.get("publishGrids", 0): self.publishElements([elementName], timeRange) # Required if Message is a trigger def executeMsg(self, msg): if self._process is None: return status = msg.status() #list of messages for msg in status: if msg.status() == AFPS.ProcessStatus.FINISHED: self._doExecuteMsg(msg) #call for each possible message # Performs the processing def _doExecuteMsg(self, name, fcst, entry, drtTime, state): if self._reportingMode not in ["Pretty"]: self.output("Calling TextProductTest Message Invoked " + `entry`, self._outFile) checkMethod = entry.get("checkMethod", None) checkStrings = entry.get("checkStrings", None) notCheckStrings = entry.get("notCheckStrings", None) orderStrings = entry.get("orderStrings", None) internalStrip = entry.get("internalStrip", 1) commentary = entry.get("commentary", None) if True: # Clean up fileChanges self._cleanUpFiles(entry) self._cleanUpWritableCopies(entry) self.output("\n----------------------------------------------", self._outFile) if self._reportingMode not in ["Pretty"]: self.output(name + " (Elapsed time:" + self._getElapsedTimeStr() + ")", self._outFile) else: self.output(name, self._outFile) if commentary is not None: self.output(commentary + "\n", self._outFile) self._scripts += 1 if state.equals(ProductStateEnum.Failed): self.output("Formatter failed!", self._outFile) success = False else: # Look at results # If any of the check fails, the test fails check1 = 1 check2 = 1 check3 = 1 if checkMethod is not None: check1 = checkMethod(fcst) if self._reportingMode not in ["Pretty"]: if not check1: failMsg = "CHECK METHOD FAILED:" + name self.output(failMsg, self._outFile) else: self.output("CHECK METHOD PASSED: " + name, self._outFile) # Prepare results for string searches if fcst is not None: fcstStr = fcst.replace("\n", " ") fcstStrRaw = fcstStr if internalStrip: fcstStr = self.internalStrip(fcstStr) fcstStr = fcstStr.replace("... ", "...") fcstStrRaw = fcstStrRaw.replace("... ", "...") if checkStrings is not None: check2 = self._checkStrs(name, fcst, checkStrings, orderStrings, fcstStr, fcstStrRaw, internalStrip) if check2: if self._reportingMode not in ["Pretty"]: self.output("STRING SEARCHES PASSED ", self._outFile) if notCheckStrings is not None: check3 = self._checkStrs(name, fcst, notCheckStrings, 0, fcstStr, fcstStrRaw, internalStrip, checkMode=0) if check3: if self._reportingMode not in ["Pretty"]: self.output("'NOT' STRING SEARCHES PASSED ", self._outFile) success = check1 and check2 and check3 if success: self._passed += 1 logmsg = name + " Passed" self.statusBarMsg(logmsg, "R", category="ISC") else: self._failures += 1 logmsg = name + " Failed" self.statusBarMsg(logmsg, "A", category="ISC") self.output(logmsg, self._outFile) if self._failures > self._failLimit: self._cleanUp(entry, drtTime) # Stop processing return if self._reportingMode in ["Verbose", "Moderate"]: if fcst and success: # checkStrings failure will print out the product already self.output("\n" + fcst, self._outFile) # DecodeVTEC if requested # Note for later: if in practice mode, set active # table to runVTECDecoder("PRACTICE", fcst) if success and entry.get("decodeVTEC", 0): self.__runVTECDecoder(fcst, drtTime) # wait until table has been modified or 5 seconds t1 = time.time(); while not self.__listener.isReceivedNotification(): time.sleep(0.1) if time.time() - t1 > 20: self.output("Vtec Decoder timed out!", self._outFile) break t2 = time.time(); if self._reportingMode in ["Verbose", "Moderate"]: self.output("Vtec Decoder wait time: " + "%6.2f" % (t2 - t1), self._outFile) self._cleanUp(entry, drtTime) def _cleanUp(self, entry, drtTime): if drtTime is not None: import offsetTime offsetTime.reset() reload(offsetTime) # fileChanges = entry.get("fileChanges", []) # for fileName, fileType, changeType, strings, cleanUp in fileChanges: # fileName = fileName.replace("<site>", self.getSiteID()) # reload(sys.modules[fileName]) # productType = entry['productType'] # if sys.modules.has_key(productType): # del sys.modules[productType] def _cleanUpWritableCopies(self, entry, user="GFETEST"): writables = entry.get("writeableCopies", None) if writables is None: return for fileSrc, fileType, destFilename in writables: source = fileSrc.replace("<site>", self.getSiteID()) dest = destFilename.replace("<site>", self.getSiteID()) #try: if 1 == 1: TextFileUtil.makeWritableCopy(source, fileType, dest, True); #except: else: pass self.output("Cleanup writable copies: " + fileSrc, self._outFile) def _cleanUpFiles(self, entry): fileChanges = entry.get("fileChanges", []) for fileName, fileType, changeType, strings, cleanUp in fileChanges: fileName = fileName.replace("<site>", self.getSiteID()) textFileID = TextFileUtil.getTextFile(fileName, fileType) if self._leaveFileChanges == "no": if cleanUp in ["delete", "undo"]: # File changes are made as overrides at the GFETEST user level # We just remove these files to restore the previous file destLf = TextFileUtil.getUserTextFile(textFileID) TextFileUtil.deleteTextFile(destLf) def _checkStrs(self, name, fcst, checkStrings, orderStrings, fcstStr, fcstStrRaw, internalStrip, checkMode=1): # Check the fcstStr for the list of checkStrings # If a checkString is a tuple, at least one of the # given tuple strings must be found in the fcstStr (or fcstStrRaw) # If orderStrings == 1, the strings must occur in order # in the fcstStr # If checkMode == 0, the strings should NOT be found in the fcstStr # If internalStrip == 2, check both the fcstStr, and fcstStrRaw # versions. If at least one succeeds, the checkString succeeds. curIndex = -1 for cStr in checkStrings: if type(cStr) == types.TupleType: # Will pass if ANY of these strings are found # Not valid with checkMode of zero if not checkMode: continue found = 0 for subStr in cStr: found, strIndex, strIndexFlag = self._checkStr( subStr, fcstStr, fcstStrRaw, internalStrip) if found: if self._reportingMode in ["Verbose"]: self.output("StringCHECK: " + subStr + ' ' + `strIndex`, self._outFile) elif self._reportingMode in ["Pretty"]: self.output("CHECK String: " + subStr, self._outFile) break if not found: self._failed(subStr, name, fcst, fcstStr, checkMode) return 0 else: #
= 0 wind_resist = 0 dexterity = 0 #Set effects #Ray set #Conditions: Back, Arms, Legs Boost: 60 DEX if( (back == "Back / Circuray" or back == "Back / Circunion") and (arms == 'Arms / Circaray' or arms == 'Arms / Circaunion') and (legs == 'Legs / Circuray' or legs == 'Legs / Circunion') ): dexterity += 60 #Ophistia Set 1 [Austere set] #Conditions: Back and Arms, Boost: All atk 80, 50 dex, 3 ice res, 3 wind res, 3 light res, 50 HP, 20PP if (back == 'Back / Ofzeterious' and arms == 'Arms / Ofzende'): melee_power += 80 range_power += 80 tec_power += 80 melee_defense += 100 range_defense += 100 tec_defense += 100 hp += 50 pp += 20 ice_resist += 3 wind_resist += 3 light_resist += 3 #Ophistia Set 2 [Austere set] #Conditions: Legs and Weapon, Boost: All Def + 100, 50 dex, 3 ice res, 3 wind res, 3 light res if(legs == 'Legs / Ofzetrogie'): await ctx.send("You said you were using 'Legs / Ofzetrogie'. Do you plan on using an Austere weapon too? Yes or No") austere = await AustereCheck(ctx) if austere == 'yes': dexterity += 50 melee_defense += 100 range_defense += 100 tec_defense += 100 ice_resist += 3 wind_resist += 3 light_resist +=3 #Adds all the stats from stored in the unit dictionaries based on the user's unit selections, casts data as an int melee_power += int(back_dict[back]['MEL pwr']) + int(arms_dict[arms]['MEL pwr']) + int(legs_dict[legs]['MEL pwr']) range_power += int(back_dict[back]['RNG pwr']) + int(arms_dict[arms]['RNG pwr']) + int(legs_dict[legs]['RNG pwr']) tec_power += int(back_dict[back]['TEC pwr']) + int(arms_dict[arms]['TEC pwr']) + int(legs_dict[legs]['TEC pwr']) hp += int(back_dict[back]['HP']) + int(arms_dict[arms]['HP']) + int(legs_dict[legs]['HP']) pp += int(back_dict[back]['PP']) + int(arms_dict[arms]['PP']) + int(legs_dict[legs]['PP']) melee_defense += int(back_dict[back]['M DEF']) + int(arms_dict[arms]['M DEF']) + int(legs_dict[legs]['M DEF']) range_defense += int(back_dict[back]['R DEF']) + int(arms_dict[arms]['R DEF']) + int(legs_dict[legs]['R DEF']) tec_defense += int(back_dict[back]['T DEF']) + int(arms_dict[arms]['T DEF']) + int(legs_dict[legs]['T DEF']) melee_resist += int(back_dict[back]['M RES']) + int(arms_dict[arms]['M RES']) + int(legs_dict[legs]['M RES']) range_resist += int(back_dict[back]['R RES']) + int(arms_dict[arms]['R RES']) + int(legs_dict[legs]['R RES']) tec_resist += int(back_dict[back]['T RES']) + int(arms_dict[arms]['T RES']) + int(legs_dict[legs]['T RES']) light_resist += int(back_dict[back]['Light RES']) + int(arms_dict[arms]['Light RES']) + int(legs_dict[legs]['Light RES']) dark_resist += int(back_dict[back]['Dark RES']) + int(arms_dict[arms]['Dark RES']) + int(legs_dict[legs]['Dark RES']) fire_resist += int(back_dict[back]['Fire RES']) + int(arms_dict[arms]['Fire RES']) + int(legs_dict[legs]['Fire RES']) ice_resist += int(back_dict[back]['Ice RES']) + int(arms_dict[arms]['Ice RES']) + int(legs_dict[legs]['Ice RES']) lightning_resist += int(back_dict[back]['Lightning RES']) + int(arms_dict[arms]['Lightning RES']) + int(legs_dict[legs]['Lightning RES']) wind_resist += int(back_dict[back]['Wind RES']) + int(arms_dict[arms]['Wind RES']) + int(legs_dict[legs]['Wind RES']) dexterity += int(back_dict[back]['DEX']) + int(arms_dict[arms]['DEX']) + int(legs_dict[legs]['DEX']) await ctx.send("Alright {}! I've finally added up all the numbers!".format(ctx.author.display_name)) response = "**({}) ({}) ({})**```\nMEL PWR: {}\nRNG PWR: {}\nTEC PWR: {}\nHP: {}\nPP: {}\n\nMEL DEF: {}\nRNG DEF: {}\nTEC DEF: {}\n\nMEL RES: {}\nRNG RES: {}\nTEC RES: {}\nLight RES: {}\nDark RES: {}\nFire RES: {}\nIce RES: {}\nLightning RES: {}\nWind RES: {}\n\nDEX: {}\n```".format(back_dict[back]['Unit'], arms_dict[arms]['Unit'], legs_dict[legs]['Unit'], melee_power, range_power, tec_power, hp, pp, melee_defense, range_defense, tec_defense, melee_resist, range_resist, tec_resist, light_resist, dark_resist, fire_resist, ice_resist, lightning_resist, wind_resist, dexterity) await ctx.send(response) #************** host() **************** #User command to organize/gather signups for guild events #Prompts the user for the guild, event name, party type and event start time with fun dialogue. #This command is can only be used in private messages to avoid clutter on guild chat channels #Notes: This command was the reason this bot was created. The public PSO2 bot that was offered [Matoi-chan] also had a signup system #The usage of that bot's event hosting command was very rigid/unforgiving to mistakes and did not give good feedback/recovery. So Hoshii was orignally made to supplement what Matoi lacked for event hosting @client.command(description='Host an alliance event using Hoshii-kun\'s built in signup system. You will be prompted via DMs for the event\'s name, party size and time/timezone. Event signup will be posted on the alliance events channel. For event times, you may use relative times such as "in 20 minutes or tomorrow at 12 EDT" or alternatively, a full date such as "December 25th at 1:00PM EDT"') @commands.max_concurrency(1, per=BucketType.user, wait=False) async def host(ctx, *args): if not isinstance(ctx.channel, discord.channel.DMChannel): await ctx.author.send("Hi {}! Hosting is only done from DMs, so feel free to type /host again here to get started".format(ctx.author.display_name)) return await ctx.author.send("Hi {}! So I hear you wanted to run an event huh?".format(ctx.author.display_name)) await ctx.author.send("...What? You wanted my help organizing it?!? Well okay, but only because it's you who asked ♡") #Guild guild = await getGuildSelection(ctx) if guild == "Timeout" or guild is None: return await ctx.author.send("Wow! The folks at {} sure are lucky to have you {}. Organizing events is hard work ya'know?".format(guild.name, ctx.author.display_name)) #Event Name await ctx.author.send("\nNow then, let's get started! What would you like the event to be named?") eventName = await getEventName(ctx) if eventName == "Timeout" or eventName is None: return await ctx.author.send("{}? {}! That's a nice name!".format(eventName.content, ctx.author.display_name)) #Party Type await ctx.author.send("Now, is this for a party of 4, a multiparty of 8, a multiparty of 12 or an unlimited party?") partyType = await getPartyType(ctx) if partyType == "Timeout" or partyType is None: return await ctx.author.send("{} people, got it {}!".format(partyType.content, ctx.author.display_name)) #Date await ctx.author.send("Now, can you tell me when your event will be? You can tell me the date, time and timezone.") date = await getEventDate(ctx) if date == "Timeout" or date is None: return await ctx.author.send("Got it {}! Your event's time will be {}".format(ctx.author.display_name, date)) host = guild.get_member(ctx.author.id).nick if host is None: host = guild.get_member(ctx.author.id).display_name await ctx.author.send("Here's what I have:```\n[Event Name] {}\n[Hosted by] {}\n[Party size] {}\n[Date/Time] {}\n```".format(eventName.content, host, partyType.content, date)) await ctx.author.send("I'll go tell the others now! You can count on me {} ❤️".format(ctx.author.display_name)) event = GuildEvent(guild, eventName.content, partyType.content, date, host) await event.listEventInfo() event_ID = await event.shareEvent() reminder_time = event.eventTime.astimezone(pytz.utc) - timedelta(minutes = 15) notifier.add_job(notify, 'date', run_date=reminder_time, args = [event_ID]) #************** loadAllEventNotifs() **************** #When Hoshii boots up, Hoshii goes through the servers he is a member of and looks for recent posts he made in 'event-hosting' #For posts he created, he grabs the message ID and tries to see if it's associated with an event ID [event IDs are stored as the event post's message ID] #If there is an event post and event match inside the DB, we assume the event is still going on because the event post was not deleted. #We then add the event to the notifier scheduler so we can notify signups when the event is about to start. async def loadAllEventNotifs(): for guild in client.guilds: channels = guild.text_channels for i in channels: if(i.name) == ('event-hosting'): event_channel = i async for message in event_channel.history(limit = 100): if message.author == client.user: event = await loadEvent(message.id) reminder_time = event.eventTime.astimezone(pytz.utc) - timedelta(minutes = 15) notifier.add_job(notify, 'date', run_date=reminder_time, args = [event.event_ID]) #notifier.print_jobs() #************** on_raw_reaction_add **************** #This function handles reactions to event posts #Reacting with the heart emoji signs the reacting person up for the event #Reacting with the clock emoji sends the reacting person with a list of timezone conversions for the event start time @client.event async def on_raw_reaction_add(payload): #Initializes fn with reaction information message_id = payload.message_id emoji = payload.emoji channel = client.get_channel(payload.channel_id) guild = client.get_guild(payload.guild_id) member = guild.get_member(payload.user_id) #If reaction is from a bot, exit if member.bot: return #If channel doesn't register, exit if channel is None: return #Grabs the message id from the reacted message message = await channel.fetch_message(message_id) #Checks if the message ID being reacted to is associated with an event, if it isn't, exit event = await loadEvent(message_id) if event is None: return #If the reaction is this clock emoji, send event's timezone conversions to the user reacting if str(emoji) == '🕒': timezones = ['US/Eastern', 'US/Central', 'US/Mountain', 'US/Pacific', 'US/Hawaii', 'US/Alaska', 'Brazil/East', 'Europe/Brussels', 'Europe/Madrid', 'Europe/London', 'Europe/Paris', 'Europe/Rome', 'Europe/Berlin', 'Europe/Budapest', 'Europe/Bucharest', 'Europe/Moscow', 'Europe/Kiev', 'Asia/Ho_Chi_Minh', 'Asia/Jakarta', 'Asia/Manila', 'Asia/Seoul', 'Asia/Shanghai', 'Australia/Queensland', 'Australia/Broken_Hill', 'Australia/West'] await member.send("Hi {}! I heard that you were interested in some of {}'s timezones. I have prepared a list of some common timezones just for you ️❤️".format(member.display_name,
= expected_obj if hvd.rank() == 0 else {} obj = hvd.broadcast_object(obj, root_rank=0) self.assertDictEqual(obj, expected_obj) def test_allgather_object(self): hvd.init() d = {'metric_val_1': hvd.rank()} if hvd.rank() == 1: d['metric_val_2'] = 42 results = hvd.allgather_object(d) expected = [{'metric_val_1': i} for i in range(hvd.size())] if hvd.size() > 1: expected[1] = {'metric_val_1': 1, 'metric_val_2': 42} self.assertEqual(len(results), hvd.size()) self.assertListEqual(results, expected) def test_compression_fp16(self): valid_dtypes = [torch.float32, torch.float64] invalid_dtypes = [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64] tensor_size = [5] * 3 compression = hvd.Compression.fp16 for dtype in valid_dtypes: tensor = torch.ones(tensor_size, dtype=dtype) tensor_compressed, ctx = compression.compress(tensor) self.assertEqual(tensor_compressed.dtype, torch.float16) tensor_decompressed = compression.decompress(tensor_compressed, ctx) self.assertEqual(tensor_decompressed.dtype, dtype) expected = np.ones(tensor_size) err = np.linalg.norm(expected - tensor_decompressed.data.numpy()) self.assertLess(err, 0.00000001) for dtype in invalid_dtypes: tensor = torch.ones(tensor_size, dtype=dtype) tensor_compressed, ctx = compression.compress(tensor) self.assertEqual(tensor_compressed.dtype, dtype) tensor_decompressed = compression.decompress(tensor_compressed, ctx) self.assertEqual(tensor_decompressed.dtype, dtype) if dtype != torch.int8: # Cannot cast to NumPy with a CharTensor expected = np.ones(tensor_size) err = np.linalg.norm(expected - tensor_decompressed.data.numpy()) self.assertLess(err, 0.00000001) def test_force_allreduce(self): """Test that allreduce is forced on all gradients during opt.step().""" hvd.init() rank = hvd.rank() size = hvd.size() # This test does not apply if there is only one worker. if size == 1: self.skipTest("Only one worker available") N, D_in, H, D_out = 64, 100, 10, 10 x = torch.randn(N, D_in).requires_grad_() y = torch.randn(N, D_out).requires_grad_() def new_optimizer(cls, opt_params, model): p = { k: v for k, v in opt_params.items() if k in inspect.getargspec(cls.__init__).args } return cls(model.parameters(), **p) class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = torch.nn.Linear(D_in, H) self.fc2 = torch.nn.Linear(H, D_out) self.fc3 = torch.nn.Linear(D_out, D_out) def forward(self, x_): x_ = F.relu(self.fc1(x_)) x1_ = self.fc2(x_) x2_ = self.fc3(F.relu(x1_)) return x1_, x2_ def create_model(opt_class, opt_params): model = Net() hvd.broadcast_parameters(model.state_dict(), root_rank=0) opt = new_optimizer(opt_class, opt_params, model) opt = hvd.DistributedOptimizer( opt, named_parameters=model.named_parameters()) return model, opt # L-BFGS is currently unsupported, as are sparse tensors, which are # required by SparseAdam optimizer optimizers = [ (subclass.__name__, subclass) for subclass in torch.optim.Optimizer.__subclasses__() if subclass.__module__.startswith('torch.optim') and subclass != torch.optim.LBFGS and subclass != torch.optim.SparseAdam ] optimizers.sort(key=lambda tup: tup[0]) opt_params_list = [ dict(lr=0.2, momentum=0.9, weight_decay=0.1, centered=True), dict(lr=0.2) ] for (opt_name, opt_class), opt_params in itertools.product(optimizers, opt_params_list): model, optimizer = create_model(opt_class, opt_params) y_pred1, y_pred2 = model(x) if rank == 0: loss = F.mse_loss(y_pred1, y, size_average=False) else: loss = F.mse_loss(y_pred2, y, size_average=False) optimizer.zero_grad() loss.backward() optimizer.step() def test_model_parallelism(self): """Test that tensors on different GPUs are supported.""" # Only do this test if there are GPUs available. if not torch.cuda.is_available(): self.skipTest("No GPUs available") hvd.init() local_rank = hvd.local_rank() size = hvd.size() # This test does not apply if there is only one worker. if size == 1: self.skipTest("Only one worker available") # Skip the test if there are not enough GPUs. if torch.cuda.device_count() < hvd.local_size() * 2: self.skipTest("Not enough GPUs available") first_device = local_rank * 2 second_device = local_rank * 2 + 1 class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() # Place parts of model on different GPUs. self.conv1 = torch.nn.Conv2d(1, 100, 1).cuda(first_device) self.conv2 = torch.nn.Conv2d(100, 1, 1).cuda(second_device) def forward(self, x): x = x.cuda(first_device) x = self.conv1(x) x = x.cuda(second_device) x = self.conv2(x) return x model = Net() inp = torch.rand([1, 1, 1000, 1000]) opt = torch.optim.SGD(model.parameters(), lr=0.1) opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters()) loss = model(inp).sum() opt.zero_grad() loss.backward() opt.step() def test_delta_optimizer(self): """Test that delta optimizer.""" hvd.init() # TODO support non-MPI Adasum operation # Only do this test if there are GPUs available. if not hvd.mpi_enabled() or not torch.cuda.is_available(): self.skipTest("No GPUs available") local_rank = hvd.local_rank() size = hvd.size() # This test does not apply if there is only one worker. if size == 1: self.skipTest("Only one worker available") class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = torch.nn.Conv2d(1, 100, 1).cuda(local_rank) self.conv2 = torch.nn.Conv2d(100, 1, 1).cuda(local_rank) def forward(self, x): x = x.cuda(local_rank) x = self.conv1(x) x = x.cuda(local_rank) x = self.conv2(x) return x model = Net() inp = torch.rand([1, 1, 1000, 1000]) opt = torch.optim.SGD(model.parameters(), lr=0.1) opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters(), op=hvd.Adasum) loss = model(inp).sum() opt.zero_grad() loss.backward() opt.step() def test_duplicate_names(self): """Test that passing duplicate names to optimizer will fail.""" net1 = torch.nn.Conv2d(1, 1, 1) net2 = torch.nn.Conv2d(1, 1, 1) parameters = itertools.chain(net1.parameters(), net2.parameters()) opt = torch.optim.SGD(parameters, lr=0.1) # This will have duplicate names, since both net1 and net2 have 'weight' and 'bias' named_parameters = itertools.chain(net1.named_parameters(), net2.named_parameters()) try: hvd.DistributedOptimizer(opt, named_parameters=named_parameters) assert False, 'hvd.DistributedOptimizer did not throw error' except ValueError: pass def test_dynamic_requires_grad(self): """Test that makes sure that gradients can be turned off/on dynamically.""" hvd.init() size = hvd.size() # This test does not apply if there is only one worker. if size == 1: self.skipTest("Only one worker available") gen = torch.nn.Conv2d(1, 10, 1) disc = torch.nn.Conv2d(10, 1, 1) inp = torch.rand([1, 1, 100, 100]) gen_opt = torch.optim.SGD(gen.parameters(), lr=0.1) gen_opt = hvd.DistributedOptimizer(gen_opt, named_parameters=gen.named_parameters()) disc_opt = torch.optim.SGD(disc.parameters(), lr=0.1) disc_opt = hvd.DistributedOptimizer(disc_opt, named_parameters=disc.named_parameters()) def train_step(train_generator=False, train_discriminator=False): for p in gen.parameters(): p.requires_grad_(train_generator) for p in disc.parameters(): p.requires_grad_(train_discriminator) gen_opt.zero_grad() disc_opt.zero_grad() loss = disc(gen(inp)).sum() loss.backward() for p in gen.parameters(): assert train_generator == (p.grad is not None and p.grad.max().is_nonzero()), \ 'Gradient for generator is zero but it should be trained or vice versa.' for p in disc.parameters(): assert train_discriminator == (p.grad is not None and p.grad.max().is_nonzero()), \ 'Gradient for discriminator is zero but it should be trained or vice versa.' if train_generator: gen_opt.step() if train_discriminator: disc_opt.step() for x in range(10): # Step 1: train generator. train_step(train_generator=True) # Step 2: train discriminator. train_step(train_discriminator=True) def test_gradient_clipping(self): """Test gradient clipping example.""" hvd.init() size = hvd.size() # This test does not apply if there is only one worker. if size == 1: self.skipTest("Only one worker available") x = torch.ones(1, 1).requires_grad_() y = torch.ones(1, 1).requires_grad_() model = torch.nn.Linear(1, 1) model.weight = torch.nn.Parameter(torch.zeros(1, 1) + 0.5) model.bias = torch.nn.Parameter(torch.zeros(1)) hvd.broadcast_parameters(model.state_dict(), root_rank=0) optimizer = torch.optim.SGD(model.parameters(), lr=0.1) optimizer = hvd.DistributedOptimizer( optimizer, named_parameters=model.named_parameters()) y_pred = model(x) loss = F.mse_loss(y_pred, y) optimizer.zero_grad() loss.backward() optimizer.synchronize() prior_grad = model.weight.grad.item() torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1) clipped_grad = model.weight.grad.item() assert abs(prior_grad) > abs(clipped_grad) with optimizer.skip_synchronize(): optimizer.step() def test_synchronize_step_warning(self): """ Test that .synchronize() followed by .step() without optimizer.skip_synchronize() context will produce a warning. """ hvd.init() size = hvd.size() # This test does not apply if there is only one worker. if size == 1: self.skipTest("Only one worker available") x = torch.zeros(1, 1).requires_grad_() y = torch.ones(1, 1).requires_grad_() model = torch.nn.Linear(1, 1) hvd.broadcast_parameters(model.state_dict(), root_rank=0) optimizer = torch.optim.SGD(model.parameters(), lr=0.1) optimizer = hvd.DistributedOptimizer( optimizer, named_parameters=model.named_parameters()) y_pred = model(x) loss = F.mse_loss(y_pred, y) optimizer.zero_grad() loss.backward() optimizer.synchronize() torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1) with warnings.catch_warnings(record=True) as ws: optimizer.step() assert len(ws) == 1 assert 'optimizer.step() called without optimizer.skip_synchronize()' \ in str(ws[0].message) def test_no_named_parameters(self): """Test that leaving the default named_parameters=None will not throw an error.""" hvd.init() class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = torch.nn.Conv2d(1, 100, 1) self.conv2 = torch.nn.Conv2d(100, 1, 1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x model = Net() inp = torch.rand([1, 1, 1000, 1000]) opt = torch.optim.SGD(model.parameters(), lr=0.1) opt = hvd.DistributedOptimizer(opt) loss = model(inp).sum() opt.zero_grad() loss.backward() opt.step() def test_missing_named_parameters(self): """Test that naming half of the model parameters will throw an error.""" hvd.init() class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = torch.nn.Conv2d(1, 100, 1) self.conv2 = torch.nn.Conv2d(100, 1, 1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x model = Net() opt = torch.optim.SGD(model.parameters(), lr=0.1) try: hvd.DistributedOptimizer(opt, named_parameters=list(model.named_parameters())[0:1]) assert False, 'hvd.DistributedOptimizer did not throw error' except ValueError: pass def test_horovod_join_allreduce(self): """Test Join op with allreduce.""" hvd.init() rank = hvd.rank() size = hvd.size() dtypes = [torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor] if torch.cuda.is_available(): dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor, torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor] integral_types = [torch.IntTensor, torch.LongTensor, torch.cuda.IntTensor, torch.cuda.LongTensor] dims = [1, 2, 3] first_join_ranks = [0, 1] cachings = [False, True] for dtype, dim, first_join_rank, caching in itertools.product(dtypes, dims, first_join_ranks, cachings): torch.manual_seed(1234) def div(t, s): if _1_5_api and dtype in integral_types: return t.floor_divide(s) return t / s # Use two tensors to test fusion tensor_a = torch.FloatTensor(*([5] * dim)).random_(-100, 100) tensor_a = self.cast_and_place(tensor_a, dtype) tensor_b = torch.FloatTensor(*([17] * dim)).random_(-100, 100) tensor_b = self.cast_and_place(tensor_b, dtype) if caching: handle_a = hvd.allreduce_async(tensor_a, name="tensor_a", average=True) handle_b = hvd.allreduce_async(tensor_b, name="tensor_b", average=True) averaged_a = hvd.synchronize(handle_a) averaged_b = hvd.synchronize(handle_b) if rank == first_join_rank: if dtype.is_cuda: ret
import logging import os import re import uuid from io import BytesIO from json import load from pathlib import Path from random import randint from time import sleep from typing import Any, Dict, Generator, List, TYPE_CHECKING, Union from urllib.parse import urljoin import httpx import jsonschema from httpx import HTTPStatusError from httpx import Timeout, URL from gcapi.apibase import APIBase, ClientInterface, ModifiableMixin from gcapi.sync_async_hybrid_support import CapturedCall, mark_generator logger = logging.getLogger(__name__) def is_uuid(s): try: uuid.UUID(s) except ValueError: return False else: return True def accept_tuples_as_arrays(org): return org.redefine( "array", lambda checker, instance: isinstance(instance, tuple) or org.is_type(instance, "array"), ) Draft7ValidatorWithTupleSupport = jsonschema.validators.extend( jsonschema.Draft7Validator, type_checker=accept_tuples_as_arrays( jsonschema.Draft7Validator.TYPE_CHECKER ), ) def import_json_schema(filename): """ Loads a json schema from the module's subdirectory "schemas". This is not *really* an import but the naming indicates that an ImportError is raised in case the json schema cannot be loaded. This should also only be called while the module is loaded, not at a later stage, because import errors should be raised straight away. Parameters ---------- filename: str The jsonschema file to be loaded. The filename is relative to the "schemas" directory. Returns ------- Draft7Validator The jsonschema validation object Raises ------ ImportError Raised if the json schema cannot be loaded. """ filename = os.path.join( os.path.dirname(os.path.abspath(__file__)), "schemas", filename ) try: with open(filename) as f: jsn = load(f) return Draft7ValidatorWithTupleSupport( jsn, format_checker=jsonschema.draft7_format_checker ) except (OSError, ValueError) as e: # I want missing/failing json imports to be an import error because that # is what they should indicate: a "broken" library raise ImportError( "Json schema '{file}' cannot be loaded: {error}".format( file=filename, error=e ) ) from e class ImagesAPI(APIBase): base_path = "cases/images/" def download( self, *, filename: Union[str, Path], # extension is added automatically image_type: str = None, # restrict download to a particular image type pk=None, url=None, files=None, **params, ): if len([p for p in (pk, url, files, params) if p]) != 1: raise ValueError( "Exactly one of pk, url, files or params must be specified" ) # Retrieve details of the image if needed if files is None: if pk is not None: image = yield from self.detail(pk=pk) elif url is not None: image = yield self.yield_request(method="GET", url=url) self.verify_against_schema(image) else: image = yield from self.detail(**params) files = image["files"] # Make sure file destination exists p = Path(filename).absolute() directory = p.parent directory.mkdir(parents=True, exist_ok=True) basename = p.name # Download the files downloaded_files = [] for file in files: if image_type and file["image_type"] != image_type: continue data = ( yield self.yield_request( method="GET", url=file["file"], follow_redirects=True ) ).content suffix = file["file"].split(".")[-1] local_file = directory / f"{basename}.{suffix}" with local_file.open("wb") as fp: fp.write(data) downloaded_files.append(local_file) return downloaded_files class UploadSessionsAPI(ModifiableMixin, APIBase): base_path = "cases/upload-sessions/" class WorkstationSessionsAPI(APIBase): base_path = "workstations/sessions/" class ReaderStudyQuestionsAPI(APIBase): base_path = "reader-studies/questions/" class ReaderStudyMineAnswersAPI(ModifiableMixin, APIBase): base_path = "reader-studies/answers/mine/" validation_schemas = {"GET": import_json_schema("answer.json")} class ReaderStudyAnswersAPI(ModifiableMixin, APIBase): base_path = "reader-studies/answers/" validation_schemas = { "GET": import_json_schema("answer.json"), "POST": import_json_schema("post-answer.json"), } sub_apis = {"mine": ReaderStudyMineAnswersAPI} mine = None # type: ReaderStudyMineAnswersAPI def _process_request_arguments(self, method, data): if is_uuid(data.get("question", "")): data["question"] = str( self._client.base_url.join( ReaderStudyQuestionsAPI.base_path ).join(data["question"] + "/") ) return ModifiableMixin._process_request_arguments(self, method, data) class ReaderStudiesAPI(APIBase): base_path = "reader-studies/" validation_schemas = {"GET": import_json_schema("reader-study.json")} sub_apis = { "answers": ReaderStudyAnswersAPI, "questions": ReaderStudyQuestionsAPI, } answers = None # type: ReaderStudyAnswersAPI questions = None # type: ReaderStudyQuestionsAPI def ground_truth(self, pk, case_pk): return ( yield self.yield_request( method="GET", path=urljoin( self.base_path, pk + "/ground-truth/" + case_pk + "/" ), ) ) class AlgorithmsAPI(APIBase): base_path = "algorithms/" class AlgorithmResultsAPI(APIBase): base_path = "algorithms/results/" class AlgorithmJobsAPI(ModifiableMixin, APIBase): base_path = "algorithms/jobs/" @mark_generator def by_input_image(self, pk): yield from self.iterate_all(params={"image": pk}) class ArchivesAPI(APIBase): base_path = "archives/" class RetinaLandmarkAnnotationSetsAPI(ModifiableMixin, APIBase): base_path = "retina/landmark-annotation/" validation_schemas = { "GET": import_json_schema("landmark-annotation.json"), "POST": import_json_schema("post-landmark-annotation.json"), } def for_image(self, pk): result = yield self.yield_request( method="GET", path=self.base_path, params={"image_id": pk} ) for i in result: self.verify_against_schema(i) return result class RetinaPolygonAnnotationSetsAPI(ModifiableMixin, APIBase): base_path = "retina/polygon-annotation-set/" validation_schemas = { "GET": import_json_schema("polygon-annotation.json"), "POST": import_json_schema("post-polygon-annotation.json"), } class RetinaSinglePolygonAnnotationsAPI(ModifiableMixin, APIBase): base_path = "retina/single-polygon-annotation/" validation_schemas = { "GET": import_json_schema("single-polygon-annotation.json"), "POST": import_json_schema("post-single-polygon-annotation.json"), } class RetinaETDRSGridAnnotationsAPI(ModifiableMixin, APIBase): base_path = "retina/etdrs-grid-annotation/" validation_schemas = { "GET": import_json_schema("etdrs-annotation.json"), "POST": import_json_schema("post-etdrs-annotation.json"), } class UploadsAPI(APIBase): base_path = "uploads/" chunk_size = 32 * 1024 * 1024 n_presigned_urls = 5 # number of pre-signed urls to generate max_retries = 10 def create(self, *, filename): return ( yield self.yield_request( method="POST", path=self.base_path, json={"filename": str(filename)}, ) ) def generate_presigned_urls(self, *, pk, s3_upload_id, part_numbers): url = urljoin( self.base_path, f"{pk}/{s3_upload_id}/generate-presigned-urls/" ) return ( yield self.yield_request( method="PATCH", path=url, json={"part_numbers": part_numbers} ) ) def abort_multipart_upload(self, *, pk, s3_upload_id): url = urljoin( self.base_path, f"{pk}/{s3_upload_id}/abort-multipart-upload/" ) return (yield self.yield_request(method="PATCH", path=url)) def complete_multipart_upload(self, *, pk, s3_upload_id, parts): url = urljoin( self.base_path, f"{pk}/{s3_upload_id}/complete-multipart-upload/" ) return ( yield self.yield_request( method="PATCH", path=url, json={"parts": parts} ) ) def list_parts(self, *, pk, s3_upload_id): url = urljoin(self.base_path, f"{pk}/{s3_upload_id}/list-parts/") return (yield self.yield_request(path=url)) def upload_fileobj(self, *, fileobj, filename): user_upload = yield from self.create(filename=filename) pk = user_upload["pk"] s3_upload_id = user_upload["s3_upload_id"] try: parts = yield from self._put_fileobj( fileobj=fileobj, pk=pk, s3_upload_id=s3_upload_id ) except Exception: yield from self.abort_multipart_upload( pk=pk, s3_upload_id=s3_upload_id ) raise return ( # noqa: B901 yield from self.complete_multipart_upload( pk=pk, s3_upload_id=s3_upload_id, parts=parts ) ) def _put_fileobj(self, *, fileobj, pk, s3_upload_id): part_number = 1 # s3 uses 1-indexed chunks presigned_urls = {} parts = [] while True: chunk = fileobj.read(self.chunk_size) if not chunk: break if str(part_number) not in presigned_urls: presigned_urls.update( ( yield from self._get_next_presigned_urls( pk=pk, s3_upload_id=s3_upload_id, part_number=part_number, ) ) ) response = yield from self._put_chunk( chunk=chunk, url=presigned_urls[str(part_number)] ) parts.append( {"ETag": response.headers["ETag"], "PartNumber": part_number} ) part_number += 1 return parts def _get_next_presigned_urls(self, *, pk, s3_upload_id, part_number): response = yield from self.generate_presigned_urls( pk=pk, s3_upload_id=s3_upload_id, part_numbers=[ *range(part_number, part_number + self.n_presigned_urls) ], ) return response["presigned_urls"] def _put_chunk(self, *, chunk, url): num_retries = 0 e = Exception if isinstance(chunk, BytesIO): chunk = chunk.read() while num_retries < self.max_retries: try: result = yield self.yield_request.request( method="PUT", url=url, content=chunk ) break except HTTPStatusError as _e: status_code = _e.response.status_code if status_code in [409, 423] or status_code >= 500: num_retries += 1 e = _e sleep((2 ** num_retries) + (randint(0, 1000) / 1000)) else: raise else: raise e return result class WorkstationConfigsAPI(APIBase): base_path = "workstations/configs/" def _generate_auth_header(token: str = "") -> Dict: if not token: try: token = str(os.environ["GRAND_CHALLENGE_AUTHORIZATION"]) except KeyError: raise RuntimeError("Token must be set") from None token = re.sub(" +", " ", token) token_parts = token.strip().split(" ") if len(token_parts) not in [1, 2]: raise RuntimeError("Invalid token format") return {"Authorization": f"BEARER {token_parts[-1]}"} class ApiDefinitions: images: ImagesAPI reader_studies: ReaderStudiesAPI sessions: WorkstationSessionsAPI uploads: UploadsAPI algorithms: AlgorithmsAPI algorithm_results: AlgorithmResultsAPI algorithm_jobs: AlgorithmJobsAPI archives: ArchivesAPI workstation_configs: WorkstationConfigsAPI retina_landmark_annotations: RetinaLandmarkAnnotationSetsAPI retina_polygon_annotation_sets: RetinaPolygonAnnotationSetsAPI retina_single_polygon_annotations: RetinaSinglePolygonAnnotationsAPI retina_etdrs_grid_annotations: RetinaETDRSGridAnnotationsAPI raw_image_upload_sessions: UploadSessionsAPI class ClientBase(ApiDefinitions, ClientInterface): # Make MyPy happy, this is a mixin now, so the dependent values will # come in through a side-channel if TYPE_CHECKING: _Base = httpx.Client _api_meta: ApiDefinitions __org_api_meta: ApiDefinitions def __init__( self, init_base_cls, token: str = "", base_url: str = "https://grand-challenge.org/api/v1/", verify: bool = True, timeout: float = 60.0, ): init_base_cls.__init__( self, verify=verify, timeout=Timeout(timeout=timeout) ) self.headers.update({"Accept": "application/json"}) self._auth_header = _generate_auth_header(token=token) self.base_url = URL(base_url) if self.base_url.scheme.lower() != "https": raise RuntimeError("Base URL must be https") self._api_meta = ApiDefinitions() self.__org_api_meta = ApiDefinitions() for name, cls in self._api_meta.__annotations__.items(): setattr(self._api_meta, name, cls(client=self)) setattr(self.__org_api_meta, name, cls(client=self)) def __getattr__(self, item): api = getattr(self._api_meta, item, None) if api: return api else: raise AttributeError( f"'ClientBase' has no function or API '{item}'" ) def validate_url(self, url): url = URL(url) if not url.scheme == "https" or url.netloc != self.base_url.netloc: raise RuntimeError(f"Invalid target URL: {url}") def __call__( self, method="GET", url="", path="", params=None, json=None, extra_headers=None, files=None, data=None, follow_redirects=False, ) -> Generator[CapturedCall, Any, Any]: if url: url = URL(url) else: url = self.base_url.join(path) if extra_headers is None: extra_headers = {} if json is not None: extra_headers["Content-Type"] = "application/json" self.validate_url(url) response = yield CapturedCall( func=self.request, args=(), kwargs={ "method": method, "url": str(url), "files": {} if files is None else files, "data": {} if data is None else data, "headers": { **self.headers, **self._auth_header, **extra_headers, }, "params": {} if params is None else params, "json": json, "follow_redirects": follow_redirects, }, ) try: response.raise_for_status() except HTTPStatusError as e: if e.response.headers.get("Content-Type") == "application/json": message = e.response.json() logger.error(f"{method} request to {url} failed: {message}") raise if response.headers.get("Content-Type") == "application/json": return response.json() else: return response def _upload_files(self, *, files, **kwargs): uploads = [] for file in files: with open(file, "rb") as f: uploads.append( ( yield from self.__org_api_meta.uploads.upload_fileobj( fileobj=f, filename=file.name ) )
<reponame>miticojo/core """Base class for common speaker tasks.""" from __future__ import annotations import asyncio from collections.abc import Coroutine import contextlib import datetime from functools import partial import logging from typing import Any, Callable import urllib.parse import async_timeout from pysonos.alarms import get_alarms from pysonos.core import MUSIC_SRC_LINE_IN, MUSIC_SRC_RADIO, MUSIC_SRC_TV, SoCo from pysonos.data_structures import DidlAudioBroadcast from pysonos.events_base import Event as SonosEvent, SubscriptionBase from pysonos.exceptions import SoCoException from pysonos.music_library import MusicLibrary from pysonos.snapshot import Snapshot from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN from homeassistant.components.media_player import DOMAIN as MP_DOMAIN from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN from homeassistant.core import HomeAssistant, callback from homeassistant.helpers import entity_registry as ent_reg from homeassistant.helpers.dispatcher import ( async_dispatcher_send, dispatcher_connect, dispatcher_send, ) from homeassistant.util import dt as dt_util from .const import ( BATTERY_SCAN_INTERVAL, DATA_SONOS, DOMAIN, PLATFORMS, SCAN_INTERVAL, SEEN_EXPIRE_TIME, SONOS_ALARM_UPDATE, SONOS_CREATE_ALARM, SONOS_CREATE_BATTERY, SONOS_CREATE_MEDIA_PLAYER, SONOS_ENTITY_CREATED, SONOS_ENTITY_UPDATE, SONOS_GROUP_UPDATE, SONOS_SEEN, SONOS_STATE_PLAYING, SONOS_STATE_TRANSITIONING, SONOS_STATE_UPDATED, SOURCE_LINEIN, SOURCE_TV, SUBSCRIPTION_TIMEOUT, ) from .favorites import SonosFavorites from .helpers import soco_error EVENT_CHARGING = { "CHARGING": True, "NOT_CHARGING": False, } UNAVAILABLE_VALUES = {"", "NOT_IMPLEMENTED", None} _LOGGER = logging.getLogger(__name__) def fetch_battery_info_or_none(soco: SoCo) -> dict[str, Any] | None: """Fetch battery_info from the given SoCo object. Returns None if the device doesn't support battery info or if the device is offline. """ with contextlib.suppress(ConnectionError, TimeoutError, SoCoException): return soco.get_battery_info() def _timespan_secs(timespan: str | None) -> None | float: """Parse a time-span into number of seconds.""" if timespan in UNAVAILABLE_VALUES: return None assert timespan is not None return sum(60 ** x[0] * int(x[1]) for x in enumerate(reversed(timespan.split(":")))) class SonosMedia: """Representation of the current Sonos media.""" def __init__(self, soco: SoCo) -> None: """Initialize a SonosMedia.""" self.library = MusicLibrary(soco) self.play_mode: str | None = None self.playback_status: str | None = None self.album_name: str | None = None self.artist: str | None = None self.channel: str | None = None self.duration: float | None = None self.image_url: str | None = None self.queue_position: int | None = None self.source_name: str | None = None self.title: str | None = None self.uri: str | None = None self.position: float | None = None self.position_updated_at: datetime.datetime | None = None def clear(self) -> None: """Clear basic media info.""" self.album_name = None self.artist = None self.channel = None self.duration = None self.image_url = None self.queue_position = None self.source_name = None self.title = None self.uri = None def clear_position(self) -> None: """Clear the position attributes.""" self.position = None self.position_updated_at = None class SonosSpeaker: """Representation of a Sonos speaker.""" def __init__( self, hass: HomeAssistant, soco: SoCo, speaker_info: dict[str, Any] ) -> None: """Initialize a SonosSpeaker.""" self.hass = hass self.soco = soco self.household_id: str = soco.household_id self.media = SonosMedia(soco) self._is_ready: bool = False self._subscriptions: list[SubscriptionBase] = [] self._resubscription_lock: asyncio.Lock | None = None self._poll_timer: Callable | None = None self._seen_timer: Callable | None = None self._platforms_ready: set[str] = set() self._entity_creation_dispatcher: Callable | None = None self._group_dispatcher: Callable | None = None self._seen_dispatcher: Callable | None = None self.mac_address = speaker_info["mac_address"] self.model_name = speaker_info["model_name"] self.version = speaker_info["display_version"] self.zone_name = speaker_info["zone_name"] self.battery_info: dict[str, Any] | None = None self._last_battery_event: datetime.datetime | None = None self._battery_poll_timer: Callable | None = None self.volume: int | None = None self.muted: bool | None = None self.night_mode: bool | None = None self.dialog_mode: bool | None = None self.coordinator: SonosSpeaker | None = None self.sonos_group: list[SonosSpeaker] = [self] self.sonos_group_entities: list[str] = [] self.soco_snapshot: Snapshot | None = None self.snapshot_group: list[SonosSpeaker] | None = None def setup(self) -> None: """Run initial setup of the speaker.""" self.set_basic_info() self._entity_creation_dispatcher = dispatcher_connect( self.hass, f"{SONOS_ENTITY_CREATED}-{self.soco.uid}", self.async_handle_new_entity, ) self._group_dispatcher = dispatcher_connect( self.hass, SONOS_GROUP_UPDATE, self.async_update_groups, ) self._seen_dispatcher = dispatcher_connect( self.hass, f"{SONOS_SEEN}-{self.soco.uid}", self.async_seen ) if (battery_info := fetch_battery_info_or_none(self.soco)) is None: self._platforms_ready.update({BINARY_SENSOR_DOMAIN, SENSOR_DOMAIN}) else: self.battery_info = battery_info # Only create a polling task if successful, may fail on S1 firmware if battery_info: # Battery events can be infrequent, polling is still necessary self._battery_poll_timer = self.hass.helpers.event.track_time_interval( self.async_poll_battery, BATTERY_SCAN_INTERVAL ) else: _LOGGER.warning( "S1 firmware detected, battery sensor may update infrequently" ) dispatcher_send(self.hass, SONOS_CREATE_BATTERY, self) if new_alarms := self.update_alarms_for_speaker(): dispatcher_send(self.hass, SONOS_CREATE_ALARM, self, new_alarms) else: self._platforms_ready.add(SWITCH_DOMAIN) dispatcher_send(self.hass, SONOS_CREATE_MEDIA_PLAYER, self) async def async_handle_new_entity(self, entity_type: str) -> None: """Listen to new entities to trigger first subscription.""" self._platforms_ready.add(entity_type) if self._platforms_ready == PLATFORMS and not self._subscriptions: self._resubscription_lock = asyncio.Lock() await self.async_subscribe() self._is_ready = True def write_entity_states(self) -> None: """Write states for associated SonosEntity instances.""" dispatcher_send(self.hass, f"{SONOS_STATE_UPDATED}-{self.soco.uid}") @callback def async_write_entity_states(self) -> None: """Write states for associated SonosEntity instances.""" async_dispatcher_send(self.hass, f"{SONOS_STATE_UPDATED}-{self.soco.uid}") def set_basic_info(self) -> None: """Set basic information when speaker is reconnected.""" self.media.play_mode = self.soco.play_mode self.update_volume() @property def available(self) -> bool: """Return whether this speaker is available.""" return self._seen_timer is not None async def async_subscribe(self) -> bool: """Initiate event subscriptions.""" _LOGGER.debug("Creating subscriptions for %s", self.zone_name) try: await self.hass.async_add_executor_job(self.set_basic_info) if self._subscriptions: raise RuntimeError( f"Attempted to attach subscriptions to player: {self.soco} " f"when existing subscriptions exist: {self._subscriptions}" ) await asyncio.gather( self._subscribe(self.soco.avTransport, self.async_update_media), self._subscribe(self.soco.renderingControl, self.async_update_volume), self._subscribe(self.soco.contentDirectory, self.async_update_content), self._subscribe( self.soco.zoneGroupTopology, self.async_dispatch_groups ), self._subscribe( self.soco.deviceProperties, self.async_dispatch_properties ), self._subscribe(self.soco.alarmClock, self.async_dispatch_alarms), ) return True except SoCoException as ex: _LOGGER.warning("Could not connect %s: %s", self.zone_name, ex) return False async def _subscribe( self, target: SubscriptionBase, sub_callback: Callable ) -> None: """Create a Sonos subscription.""" subscription = await target.subscribe( auto_renew=True, requested_timeout=SUBSCRIPTION_TIMEOUT ) subscription.callback = sub_callback subscription.auto_renew_fail = self.async_renew_failed self._subscriptions.append(subscription) @callback def async_dispatch_properties(self, event: SonosEvent | None = None) -> None: """Update properties from event.""" self.hass.async_create_task(self.async_update_device_properties(event)) @callback def async_dispatch_alarms(self, event: SonosEvent | None = None) -> None: """Update alarms from event.""" self.hass.async_create_task(self.async_update_alarms(event)) @callback def async_dispatch_groups(self, event: SonosEvent | None = None) -> None: """Update groups from event.""" if event and self._poll_timer: _LOGGER.debug( "Received event, cancelling poll timer for %s", self.zone_name ) self._poll_timer() self._poll_timer = None self.async_update_groups(event) async def async_seen(self, soco: SoCo | None = None) -> None: """Record that this speaker was seen right now.""" if soco is not None: self.soco = soco was_available = self.available _LOGGER.debug("Async seen: %s, was_available: %s", self.soco, was_available) if self._seen_timer: self._seen_timer() self._seen_timer = self.hass.helpers.event.async_call_later( SEEN_EXPIRE_TIME.total_seconds(), self.async_unseen ) if was_available: self.async_write_entity_states() return self._poll_timer = self.hass.helpers.event.async_track_time_interval( partial( async_dispatcher_send, self.hass, f"{SONOS_ENTITY_UPDATE}-{self.soco.uid}", ), SCAN_INTERVAL, ) if self._is_ready: done = await self.async_subscribe() if not done: assert self._seen_timer is not None self._seen_timer() await self.async_unseen() self.async_write_entity_states() async def async_resubscribe(self, exception: Exception) -> None: """Attempt to resubscribe when a renewal failure is detected.""" async with self._resubscription_lock: if self.available: if getattr(exception, "status", None) == 412: _LOGGER.warning( "Subscriptions for %s failed, speaker may have lost power", self.zone_name, ) else: _LOGGER.error( "Subscription renewals for %s failed", self.zone_name, exc_info=exception, ) await self.async_unseen() @callback def async_renew_failed(self, exception: Exception) -> None: """Handle a failed subscription renewal.""" self.hass.async_create_task(self.async_resubscribe(exception)) async def async_unseen(self, now: datetime.datetime | None = None) -> None: """Make this player unavailable when it was not seen recently.""" self.async_write_entity_states() if self._seen_timer: self._seen_timer() self._seen_timer = None if self._poll_timer: self._poll_timer() self._poll_timer = None for subscription in self._subscriptions: await subscription.unsubscribe() self._subscriptions = [] async def async_update_device_properties(self, event: SonosEvent = None) -> None: """Update device properties using the provided SonosEvent.""" if event is None: return if (more_info := event.variables.get("more_info")) is not None: battery_dict = dict(x.split(":") for x in more_info.split(",")) await self.async_update_battery_info(battery_dict) self.async_write_entity_states() def update_alarms_for_speaker(self) -> set[str]: """Update current alarm instances. Updates hass.data[DATA_SONOS].alarms and returns a list of all alarms that are new. """ new_alarms = set() stored_alarms = self.hass.data[DATA_SONOS].alarms updated_alarms = get_alarms(self.soco) for alarm in updated_alarms: if alarm.zone.uid == self.soco.uid and alarm.alarm_id not in list( stored_alarms.keys() ): new_alarms.add(alarm.alarm_id) stored_alarms[alarm.alarm_id] = alarm for alarm_id, alarm in list(stored_alarms.items()): if alarm not in updated_alarms: stored_alarms.pop(alarm_id) return new_alarms async def async_update_alarms(self, event: SonosEvent | None = None) -> None: """Update device properties using the provided SonosEvent.""" if event is None: return if new_alarms := await self.hass.async_add_executor_job( self.update_alarms_for_speaker ): async_dispatcher_send(self.hass, SONOS_CREATE_ALARM, self, new_alarms) async_dispatcher_send(self.hass, SONOS_ALARM_UPDATE, self) self.async_write_entity_states() async def async_update_battery_info(self, battery_dict: dict[str, Any]) -> None: """Update battery info using the decoded SonosEvent.""" self._last_battery_event = dt_util.utcnow() is_charging = EVENT_CHARGING[battery_dict["BattChg"]] if not self._battery_poll_timer: # Battery info received for an S1 speaker self.battery_info.update( { "Level": int(battery_dict["BattPct"]), "PowerSource": "EXTERNAL" if is_charging else "BATTERY", } ) return if is_charging == self.charging: self.battery_info.update({"Level": int(battery_dict["BattPct"])}) else: if battery_info := await self.hass.async_add_executor_job( fetch_battery_info_or_none, self.soco ): self.battery_info = battery_info @property def is_coordinator(self) -> bool: """Return true if player is a coordinator.""" return self.coordinator is None @property def power_source(self) -> str | None: """Return the name of
import pyeccodes.accessors as _ def load(h): def wrapped(h): table2Version = h.get_l('table2Version') indicatorOfParameter = h.get_l('indicatorOfParameter') if table2Version == 200 and indicatorOfParameter == 71: return 'Total Cloud Cover' if table2Version == 200 and indicatorOfParameter == 65: return 'Snow depth water equivalent' if table2Version == 200 and indicatorOfParameter == 85: return 'Soil Temperature' if table2Version == 200 and indicatorOfParameter == 221: return 'specific cloud water content' if table2Version == 200 and indicatorOfParameter == 152: return 'Vertical integral of northward water vapour flux' if table2Version == 200 and indicatorOfParameter == 157: return 'Vertical integral of eastward water vapour flux' if table2Version == 200 and indicatorOfParameter == 191: return 'Vertical integral of northward heat flux' if table2Version == 200 and indicatorOfParameter == 190: return 'Vertical integral of eastward heat flux' if table2Version == 200 and indicatorOfParameter == 87: return 'Percentage of vegetation' if table2Version == 200 and indicatorOfParameter == 228: return 'Cloud liquid water' if table2Version == 200 and indicatorOfParameter == 127: return 'Image data' if table2Version == 200 and indicatorOfParameter == 126: return 'Wind mixing energy' if table2Version == 200 and indicatorOfParameter == 125: return 'Momentum flux, v-component' if table2Version == 200 and indicatorOfParameter == 124: return 'Momentum flux, u-component' if table2Version == 200 and indicatorOfParameter == 120: return 'Radiance (with respect to wave length)' if table2Version == 200 and indicatorOfParameter == 119: return 'Radiance (with respect to wave number)' if table2Version == 200 and indicatorOfParameter == 117: return 'Global radiation flux' if table2Version == 200 and indicatorOfParameter == 116: return 'Short wave radiation flux' if table2Version == 200 and indicatorOfParameter == 115: return 'Long wave radiation flux' if table2Version == 200 and indicatorOfParameter == 114: return 'Net long-wave radiation flux(atmosph.top)' if table2Version == 200 and indicatorOfParameter == 113: return 'Net short-wave radiation flux(atmosph.top)' if table2Version == 200 and indicatorOfParameter == 112: return 'Net long-wave radiation flux (surface)' if table2Version == 200 and indicatorOfParameter == 111: return 'Net short-wave radiation flux (surface)' if table2Version == 200 and indicatorOfParameter == 110: return 'Secondary wave mean period' if table2Version == 200 and indicatorOfParameter == 109: return 'Secondary wave direction' if table2Version == 200 and indicatorOfParameter == 108: return 'Primary wave mean period' if table2Version == 200 and indicatorOfParameter == 107: return 'Primary wave direction' if table2Version == 200 and indicatorOfParameter == 106: return 'Mean period of swell waves' if table2Version == 200 and indicatorOfParameter == 105: return 'Significant height of swell waves' if table2Version == 200 and indicatorOfParameter == 104: return 'Direction of swell waves' if table2Version == 200 and indicatorOfParameter == 103: return 'Mean period of wind waves' if table2Version == 200 and indicatorOfParameter == 102: return 'Significant height of wind waves' if table2Version == 200 and indicatorOfParameter == 101: return 'Mean direction of wind waves' if table2Version == 200 and indicatorOfParameter == 100: return 'Signific.height,combined wind waves+swell' if table2Version == 200 and indicatorOfParameter == 99: return 'Snow melt' if table2Version == 200 and indicatorOfParameter == 98: return 'Ice divergence' if table2Version == 200 and indicatorOfParameter == 97: return 'Ice growth rate' if table2Version == 200 and indicatorOfParameter == 96: return 'V-component of ice drift' if table2Version == 200 and indicatorOfParameter == 95: return 'U-component of ice drift' if table2Version == 200 and indicatorOfParameter == 94: return 'Speed of ice drift' if table2Version == 200 and indicatorOfParameter == 93: return 'Direction of ice drift' if table2Version == 200 and indicatorOfParameter == 92: return 'Ice thickness' if table2Version == 200 and indicatorOfParameter == 89: return 'Density' if table2Version == 200 and indicatorOfParameter == 88: return 'Salinity' if table2Version == 200 and indicatorOfParameter == 86: return 'Soil moisture content' if table2Version == 200 and indicatorOfParameter == 82: return 'Deviation of sea-level from mean' if table2Version == 200 and indicatorOfParameter == 80: return 'Water temperature' if table2Version == 200 and indicatorOfParameter == 77: return 'Best lifted index (to 500 hPa)' if table2Version == 200 and indicatorOfParameter == 70: return 'Main thermocline anomaly' if table2Version == 200 and indicatorOfParameter == 69: return 'Main thermocline depth' if table2Version == 200 and indicatorOfParameter == 68: return 'Transient thermocline depth' if table2Version == 200 and indicatorOfParameter == 67: return 'Mixed layer depth' if table2Version == 200 and indicatorOfParameter == 60: return 'Thunderstorm probability' if table2Version == 200 and indicatorOfParameter == 59: return 'Precipitation rate' if table2Version == 200 and indicatorOfParameter == 56: return 'Saturation deficit' if table2Version == 200 and indicatorOfParameter == 55: return 'Vapour pressure' if table2Version == 200 and indicatorOfParameter == 53: return 'Humidity mixing ratio' if table2Version == 200 and indicatorOfParameter == 50: return 'V-component of current ' if table2Version == 200 and indicatorOfParameter == 49: return 'U-component of current ' if table2Version == 200 and indicatorOfParameter == 48: return 'Speed of current' if table2Version == 200 and indicatorOfParameter == 47: return 'Direction of current' if table2Version == 200 and indicatorOfParameter == 46: return 'Vertical v-component shear' if table2Version == 200 and indicatorOfParameter == 45: return 'Vertical u-component shear' if table2Version == 200 and indicatorOfParameter == 42: return 'Absolute divergence' if table2Version == 200 and indicatorOfParameter == 41: return 'Absolute vorticity' if table2Version == 200 and indicatorOfParameter == 38: return 'Sigma coordinate vertical velocity' if table2Version == 200 and indicatorOfParameter == 31: return 'Wind direction' if table2Version == 200 and indicatorOfParameter == 30: return 'Wave spectra (3)' if table2Version == 200 and indicatorOfParameter == 29: return 'Wave spectra (2)' if table2Version == 200 and indicatorOfParameter == 28: return 'Wave spectra (1)' if table2Version == 200 and indicatorOfParameter == 27: return 'Geopotential height anomaly' if table2Version == 200 and indicatorOfParameter == 26: return 'Pressure anomaly' if table2Version == 200 and indicatorOfParameter == 25: return 'Temperature anomaly' if table2Version == 200 and indicatorOfParameter == 24: return 'Parcel lifted index (to 500 hPa)' if table2Version == 200 and indicatorOfParameter == 23: return 'Radar spectra (3)' if table2Version == 200 and indicatorOfParameter == 22: return 'Radar spectra (2)' if table2Version == 200 and indicatorOfParameter == 21: return 'Radar spectra (1)' if table2Version == 200 and indicatorOfParameter == 20: return 'Visibility' if table2Version == 200 and indicatorOfParameter == 19: return 'Lapse rate' if table2Version == 200 and indicatorOfParameter == 18: return 'Dew point depression (or deficit)' if table2Version == 200 and indicatorOfParameter == 17: return 'Dew point temperature' if table2Version == 200 and indicatorOfParameter == 16: return 'Minimum temperature' if table2Version == 200 and indicatorOfParameter == 15: return 'Maximum temperature' if table2Version == 200 and indicatorOfParameter == 14: return 'Pseudo-adiabatic potential temperature' if table2Version == 200 and indicatorOfParameter == 9: return 'Standard deviation of height' if table2Version == 200 and indicatorOfParameter == 8: return 'Geometrical height' if table2Version == 200 and indicatorOfParameter == 5: return 'ICAO Standard Atmosphere reference height' if table2Version == 200 and indicatorOfParameter == 3: return 'Pressure tendency' if table2Version == 200 and indicatorOfParameter == 145: return 'Ground/surface cover temperature' if table2Version == 200 and indicatorOfParameter == 144: return 'Temperature at canopy' if table2Version == 200 and indicatorOfParameter == 225: return 'Soil wetness of surface' if table2Version == 200 and indicatorOfParameter == 203: return 'Interception loss' if table2Version == 200 and indicatorOfParameter == 40: return 'Vertical velocity' if table2Version == 200 and indicatorOfParameter == 12: return 'Virtual temperature' if table2Version == 200 and indicatorOfParameter == 252: return 'Type of vegetation' if table2Version == 200 and indicatorOfParameter == 253: return 'Large scale moistening rate' if table2Version == 200 and indicatorOfParameter == 251: return 'Long wave radiative heating rate' if table2Version == 200 and indicatorOfParameter == 250: return 'Solar radiative heating rate' if table2Version == 200 and indicatorOfParameter == 249: return 'Vertical diffusion moistening rate' if table2Version == 200 and indicatorOfParameter == 248: return 'Vertical diffusion meridional acceleration' if table2Version == 200 and indicatorOfParameter == 247: return 'Vertical diffusion zonal acceleration' if table2Version == 200 and indicatorOfParameter == 246: return 'Vertical diffusion heating rate'
# -*- coding: utf-8 -*- """ S3 Extensions for gluon.dal.Field, reusable fields @requires: U{B{I{gluon}} <http://web2py.com>} @copyright: 2009-2012 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ["S3ReusableField", "s3_uid", "s3_meta_deletion_status", "s3_meta_deletion_fk", "s3_meta_deletion_rb", "s3_deletion_status", "s3_timestamp", "s3_authorstamp", "s3_ownerstamp", "s3_meta_fields", "s3_all_meta_field_names", # Used by GIS "s3_role_required", # Used by GIS "s3_roles_permitted", # Used by CMS (in future) "s3_lx_fields", "s3_lx_onvalidation", "s3_lx_update", "s3_address_fields", "s3_address_hide", "s3_address_onvalidation", "s3_address_update", "s3_comments", "s3_currency", "s3_date", "s3_datetime", ] import datetime from uuid import uuid4 from gluon import * # Here are dependencies listed for reference: #from gluon import current #from gluon.dal import Field #from gluon.html import * #from gluon.validators import * from gluon.dal import Query, SQLCustomType from gluon.storage import Storage from s3utils import S3DateTime, s3_auth_user_represent, s3_auth_group_represent from s3validators import IS_ONE_OF, IS_UTC_DATETIME from s3widgets import S3AutocompleteWidget, S3DateWidget, S3DateTimeWidget try: db = current.db except: # Running from 000_1st_run db = None # ============================================================================= class FieldS3(Field): """ S3 extensions of the gluon.sql.Field clas If Server Side Pagination is on, the proper CAST is needed to match the lookup table id """ def __init__(self, fieldname, type="string", length=None, default=None, required=False, requires="<default>", ondelete="CASCADE", notnull=False, unique=False, uploadfield=True, widget=None, label=None, comment=None, writable=True, readable=True, update=None, authorize=None, autodelete=False, represent=None, uploadfolder=None, compute=None, sortby=None): self.sortby = sortby Field.__init__(self, fieldname, type, length, default, required, requires, ondelete, notnull, unique, uploadfield, widget, label, comment, writable, readable, update, authorize, autodelete, represent, uploadfolder, compute) def join_via(self, value): if self.type.find("reference") == 0: return Query(self, "=", value) else: return QueryS3(self, "join_via", value) # ============================================================================= class QueryS3(Query): """ S3 extensions of the gluon.sql.Query class If Server Side Pagination is on, the proper CAST is needed to match the string-typed id to lookup table id """ def __init__(self, left, op=None, right=None): if op <> "join_via": Query.__init__(self, left, op, right) else: self.sql = "CAST(TRIM(%s,"|") AS INTEGER)=%s" % (left, right) # ============================================================================= class S3ReusableField(object): """ DRY Helper for reusable fields: This creates neither a Table nor a Field, but just an argument store. The field is created with the __call__ method, which is faster than copying an existing field. """ def __init__(self, name, type="string", **attr): self.name = name self.__type = type self.attr = Storage(attr) def __call__(self, name=None, **attr): if not name: name = self.name ia = Storage(self.attr) if attr: if not attr.get("empty", True): requires = ia.requires if requires: if not isinstance(requires, (list, tuple)): requires = [requires] if requires: r = requires[0] if isinstance(r, IS_EMPTY_OR): requires = r.other ia.update(requires=requires) if "empty" in attr: del attr["empty"] ia.update(**attr) if "script" in ia: if ia.script: if ia.comment: ia.comment = TAG[""](ia.comment, ia.script) else: ia.comment = ia.script del ia["script"] if ia.sortby is not None: return FieldS3(name, self.__type, **ia) else: return Field(name, self.__type, **ia) # ============================================================================= # Record identity meta-fields # Use URNs according to http://tools.ietf.org/html/rfc4122 s3uuid = SQLCustomType(type = "string", native = "VARCHAR(128)", encoder = lambda x: "%s" % (uuid4().urn if x == "" else str(x.encode("utf-8"))), decoder = lambda x: x) if db and current.db._adapter.represent("X", s3uuid) != "'X'": # Old web2py DAL, must add quotes in encoder s3uuid = SQLCustomType(type = "string", native = "VARCHAR(128)", encoder = (lambda x: "'%s'" % (uuid4().urn if x == "" else str(x.encode("utf-8")).replace("'", "''"))), decoder = (lambda x: x)) # Universally unique identifier for a record s3_meta_uuid = S3ReusableField("uuid", type=s3uuid, length=128, notnull=True, unique=True, readable=False, writable=False, default="") # Master-Copy-Index (for Sync) s3_meta_mci = S3ReusableField("mci", "integer", default=0, readable=False, writable=False) def s3_uid(): return (s3_meta_uuid(), s3_meta_mci()) # ============================================================================= # Record "soft"-deletion meta-fields # "Deleted"-flag s3_meta_deletion_status = S3ReusableField("deleted", "boolean", readable=False, writable=False, default=False) # Parked foreign keys of a deleted record in JSON format # => to be restored upon "un"-delete s3_meta_deletion_fk = S3ReusableField("deleted_fk", #"text", readable=False, writable=False) # ID of the record replacing this record # => for record merger (de-duplication) s3_meta_deletion_rb = S3ReusableField("deleted_rb", "integer", readable=False, writable=False) def s3_deletion_status(): return (s3_meta_deletion_status(), s3_meta_deletion_fk(), s3_meta_deletion_rb()) # ============================================================================= # Record timestamp meta-fields s3_meta_created_on = S3ReusableField("created_on", "datetime", readable=False, writable=False, default=lambda: datetime.datetime.utcnow()) s3_meta_modified_on = S3ReusableField("modified_on", "datetime", readable=False, writable=False, default=lambda: datetime.datetime.utcnow(), update=lambda: datetime.datetime.utcnow()) def s3_timestamp(): return (s3_meta_created_on(), s3_meta_modified_on()) # ========================================================================= # Record authorship meta-fields def s3_authorstamp(): """ Record ownership meta-fields """ auth = current.auth utable = auth.settings.table_user if auth.is_logged_in(): current_user = current.session.auth.user.id else: current_user = None # Author of a record s3_meta_created_by = S3ReusableField("created_by", utable, readable=False, writable=False, requires=None, default=current_user, represent=s3_auth_user_represent, ondelete="RESTRICT") # Last author of a record s3_meta_modified_by = S3ReusableField("modified_by", utable, readable=False, writable=False, requires=None, default=current_user, update=current_user, represent=s3_auth_user_represent, ondelete="RESTRICT") return (s3_meta_created_by(), s3_meta_modified_by()) # ========================================================================= def s3_ownerstamp(): """ Record ownership meta-fields """ auth = current.auth utable = auth.settings.table_user # Individual user who owns the record s3_meta_owned_by_user = S3ReusableField("owned_by_user", utable, readable=False, writable=False, requires=None, default=current.session.auth.user.id if auth.is_logged_in() else None, represent=lambda id: \ id and s3_auth_user_represent(id) or \ current.messages.UNKNOWN_OPT, ondelete="RESTRICT") # Role of users who collectively own the record s3_meta_owned_by_group = S3ReusableField("owned_by_group", "integer", readable=False, writable=False, requires=None, default=None, represent=s3_auth_group_represent) # Person Entity owning the record s3_meta_owned_by_entity = S3ReusableField("owned_by_entity", "integer", readable=False, writable=False, requires=None, default=None, # use a lambda here as we don't # want the model to be loaded yet represent=lambda val: \ current.s3db.pr_pentity_represent(val)) # Person Entity controlling access to this record s3_meta_realm_entity = S3ReusableField("realm_entity", "integer", readable=False, writable=False, requires=None, default=None, # use a lambda here as we don't # want the model to be loaded yet represent=lambda val: \ current.s3db.pr_pentity_represent(val)) return (s3_meta_owned_by_user(), s3_meta_owned_by_group(), s3_meta_owned_by_entity(), # retained for migration s3_meta_realm_entity()) # ========================================================================= def s3_meta_fields(): """ Normal meta-fields added to every table """ utable = current.auth.settings.table_user # Approver of a record s3_meta_approved_by = S3ReusableField("approved_by", "integer", readable=False, writable=False, requires=None, represent=s3_auth_user_represent) fields = (s3_meta_uuid(), s3_meta_mci(), s3_meta_deletion_status(), s3_meta_deletion_fk(), s3_meta_deletion_rb(), s3_meta_created_on(), s3_meta_modified_on(), s3_meta_approved_by(), ) fields = (fields + s3_authorstamp() + s3_ownerstamp()) return fields def s3_all_meta_field_names(): return [field.name for field in s3_meta_fields()] # ========================================================================= # Reusable roles fields def s3_role_required(): """ Role Required to access a resource - used by GIS for map layer permissions management """ T = current.T gtable = current.auth.settings.table_group f = S3ReusableField("role_required", gtable, sortby="role", requires = IS_NULL_OR( IS_ONE_OF(db, "auth_group.id", "%(role)s", zero=T("Public"))), widget = S3AutocompleteWidget("admin", "group", fieldname="role"), represent = s3_auth_group_represent, label = T("Role Required"), comment = DIV(_class="tooltip", _title="%s|%s" % (T("Role Required"), T("If this record should be restricted then select which role is required to access the record here."))), ondelete = "RESTRICT") return f() # ------------------------------------------------------------------------- def s3_roles_permitted(name="roles_permitted", **attr): """ List of Roles Permitted to access a resource - used by CMS """ from s3validators import IS_ONE_OF T = current.T if "label" not in attr: label = T("Roles Permitted") if "sortby" not in attr: sortby = "role" if "represent" not in attr: represent = s3_auth_group_represent if "requires" not in attr: requires = IS_NULL_OR(IS_ONE_OF(current.db, "auth_group.id", "%(role)s", multiple=True)) if "comment" not in attr: comment = DIV(_class="tooltip", _title="%s|%s" % (T("Roles Permitted"), T("If this record should be restricted then select which role(s) are permitted to access the record here."))) if "ondelete" not in attr: ondelete = "RESTRICT" f = S3ReusableField(name, "list:reference auth_group", sortby = sortby, requires = requires, represent = represent, # @ToDo #widget = S3CheckboxesWidget(lookup_table_name = "auth_group", # lookup_field_name = "role", # multiple = True), label = label, comment = comment, ondelete = ondelete) return f() # ============================================================================= # Lx # # These fields are populated onaccept from location_id # - for many reads to fewer writes, this is faster than Virtual Fields # - @ToDO: No need for virtual fields - replace with simple joins # # Labels that vary by country are set by gis.update_table_hierarchy_labels() # address_L4 = S3ReusableField("L4", readable=False, writable=False) address_L3 = S3ReusableField("L3", readable=False, writable=False) address_L2 = S3ReusableField("L2", readable=False, writable=False) address_L1 = S3ReusableField("L1", readable=False, writable=False) address_L0 = S3ReusableField("L0", readable=False, writable=False) def s3_lx_fields(): """ Return the fields used
<filename>noisemaker/generators.py """Noise generation interface for Noisemaker""" from functools import partial import tensorflow as tf from noisemaker.constants import ( ColorSpace, InterpolationType, OctaveBlending, ValueDistribution ) import noisemaker.effects as effects import noisemaker.oklab as oklab import noisemaker.simplex as simplex import noisemaker.value as value def basic(freq, shape, ridges=False, sin=0.0, spline_order=InterpolationType.bicubic, distrib=ValueDistribution.uniform, corners=False, mask=None, mask_inverse=False, mask_static=False, lattice_drift=0.0, color_space=ColorSpace.hsv, hue_range=.125, hue_rotation=None, saturation=1.0, hue_distrib=None, brightness_distrib=None, brightness_freq=None, saturation_distrib=None, speed=1.0, time=0.0, octave_effects=None, octave=1, **post_process_args): """ Generate a single layer of scaled noise. .. image:: images/gaussian.jpg :width: 1024 :height: 256 :alt: Noisemaker example output (CC0) :param int|list[int] freq: Base noise frequency. Int, or list of ints for each spatial dimension :param list[int]: Shape of noise. For 2D noise, this is [height, width, channels] :param bool ridges: "Crease" at midpoint values: (1 - abs(n * 2 - 1)) :param float sin: Apply sin function to noise basis :param int spline_order: Spline point count. 0=Constant, 1=Linear, 2=Cosine, 3=Bicubic :param int|str|ValueDistribution distrib: Type of noise distribution. See :class:`ValueDistribution` enum :param bool corners: If True, pin values to corners instead of image center :param None|ValueMask mask: :param bool mask_inverse: :param bool mask_static: If True, don't animate the mask :param float lattice_drift: Push away from underlying lattice :param ColorSpace color_space: :param float hue_range: HSV hue range :param float|None hue_rotation: HSV hue bias :param float saturation: HSV saturation :param None|int|str|ValueDistribution hue_distrib: Override ValueDistribution for hue :param None|int|str|ValueDistribution saturation_distrib: Override ValueDistribution for saturation :param None|int|str|ValueDistribution brightness_distrib: Override ValueDistribution for brightness :param None|int|list[int] brightness_freq: Override frequency for brightness :param float speed: Displacement range for Z/W axis (simplex and periodic only) :param float time: Time argument for Z/W axis (simplex and periodic only) :return: Tensor Additional keyword args will be sent to :py:func:`noisemaker.effects.post_process` """ if isinstance(freq, int): freq = value.freq_for_shape(freq, shape) color_space = value.coerce_enum(color_space, ColorSpace) common_value_params = { "corners": corners, "mask": mask, "mask_inverse": mask_inverse, "mask_static": mask_static, "speed": speed, "spline_order": spline_order, "time": time, } tensor = value.values(freq=freq, shape=shape, distrib=distrib, **common_value_params) # Use 1 channel for per-channel noise generation, if any common_value_params["shape"] = [shape[0], shape[1], 1] if lattice_drift: tensor = value.refract(tensor, shape, time=time, speed=speed, displacement=lattice_drift / min(freq[0], freq[1]), warp_freq=freq, spline_order=spline_order, signed_range=False) if octave_effects is not None: # New way for effect_or_preset in octave_effects: tensor = _apply_octave_effect_or_preset(effect_or_preset, tensor, shape, time, speed, octave) else: # Old way tensor = effects.post_process(tensor, shape, freq, time=time, speed=speed, spline_order=spline_order, color_space=color_space, **post_process_args) if color_space == ColorSpace.hsv: if hue_distrib: h = tf.squeeze(value.values(freq=freq, distrib=hue_distrib, **common_value_params)) else: if hue_rotation is None: hue_rotation = simplex.random(time=time, speed=speed) h = (tensor[:, :, 0] * hue_range + hue_rotation) % 1.0 if saturation_distrib: s = tf.squeeze(value.values(freq=freq, distrib=saturation_distrib, **common_value_params)) else: s = tensor[:, :, 1] s *= saturation if brightness_distrib or brightness_freq: if isinstance(brightness_freq, int): brightness_freq = value.freq_for_shape(brightness_freq, shape) v = tf.squeeze(value.values(freq=brightness_freq or freq, distrib=brightness_distrib or ValueDistribution.uniform, **common_value_params)) else: v = tensor[:, :, 2] if ridges and spline_order: # ridges don't work well when not interpolating values v = value.ridge(v) if sin: v = value.normalize(tf.sin(sin * v)) # Preserve the alpha channel before conversion to RGB if shape[2] == 4: a = tensor[:, :, 3] tensor = tf.image.hsv_to_rgb([tf.stack([h, s, v], 2)])[0] if shape[2] == 4: tensor = tf.stack([tensor[:, :, 0], tensor[:, :, 1], tensor[:, :, 2], a], 2) elif color_space == ColorSpace.oklab: L = tensor[:, :, 0] a = tensor[:, :, 1] * -.509 + .276 b = tensor[:, :, 2] * -.509 + .198 if shape[2] == 4: alpha = tensor[:, :, 3] if ridges and spline_order: # ridges don't work well when not interpolating values L = value.ridge(L) if sin: L = value.normalize(tf.sin(sin * L)) # print(f"L min {tf.reduce_min(L)} max {tf.reduce_max(L)}") # print(f"a min {tf.reduce_min(a)} max {tf.reduce_max(a)}") # print(f"b min {tf.reduce_min(b)} max {tf.reduce_max(b)}") tensor = value.clamp01(oklab.oklab_to_rgb(tf.stack([L, a, b], 2))) if shape[2] == 4: tensor = tf.stack([tensor[:, :, 0], tensor[:, :, 1], tensor[:, :, 2], alpha], 2) elif ridges and spline_order: tensor = value.ridge(tensor) if sin and color_space in (ColorSpace.rgb, ColorSpace.grayscale): tensor = tf.sin(sin * tensor) return tensor def multires(freq=3, shape=None, octaves=1, ridges=False, spline_order=InterpolationType.bicubic, distrib=ValueDistribution.uniform, corners=False, mask=None, mask_inverse=False, mask_static=False, lattice_drift=0.0, color_space=ColorSpace.hsv, hue_range=.125, hue_rotation=None, saturation=1.0, hue_distrib=None, saturation_distrib=None, brightness_distrib=None, brightness_freq=None, octave_blending=OctaveBlending.falloff, octave_effects=None, post_effects=None, time=0.0, speed=1.0, tensor=None): """ Generate multi-resolution value noise. For each octave: freq increases, amplitude decreases. .. image:: images/multires.jpg :width: 1024 :height: 256 :alt: Noisemaker example output (CC0) :param int|list[int] freq: Bottom layer frequency. Int, or list of ints for each spatial dimension :param list[int]: Shape of noise. For 2D noise, this is [height, width, channels] :param int octaves: Octave count. Number of multi-res layers. Typically 1-8 :param bool ridges: Per-octave "crease" at midpoint values: (1 - abs(n * 2 - 1)) :param bool post_ridges: Post-reduce "crease" at midpoint values: (1 - abs(n * 2 - 1)) :param int spline_order: Spline point count. 0=Constant, 1=Linear, 2=Cosine, 3=Bicubic :param int|ValueDistribution distrib: Type of noise distribution. See :class:`ValueDistribution` enum :param bool corners: If True, pin values to corners instead of image center :param None|ValueMask mask: :param bool mask_inverse: :param bool mask_static: If True, don't animate the mask :param float lattice_drift: Push away from underlying lattice :param ColorSpace color_space: :param float hue_range: HSV hue range :param float|None hue_rotation: HSV hue bias :param float saturation: HSV saturation :param None|ValueDistribution hue_distrib: Override ValueDistribution for HSV hue :param None|ValueDistribution saturation_distrib: Override ValueDistribution for HSV saturation :param None|ValueDistribution brightness_distrib: Override ValueDistribution for HSV brightness :param None|int|list[int] brightness_freq: Override frequency for HSV brightness :param OctaveBlendingMethod|int octave_blending: Method for flattening octave values :param list[callable] octave_effects: A list of composer lambdas to invoke per-octave :param list[callable] post_effects: A list of composer lambdas to invoke after flattening layers :param float speed: Displacement range for Z/W axis (simplex and periodic only) :param float time: Time argument for Z/W axis (simplex and periodic only) :return: Tensor Additional keyword args will be sent to :py:func:`noisemaker.effects.post_process` """ # Normalize input if isinstance(freq, int): freq = value.freq_for_shape(freq, shape) octave_blending = value.coerce_enum(octave_blending, OctaveBlending) original_shape = shape.copy() if octave_blending == OctaveBlending.alpha and shape[2] in (1, 3): # Make sure there's an alpha channel shape[2] += 1 if tensor is None: tensor = tf.zeros(shape) for octave in range(1, octaves + 1): multiplier = 2 ** octave base_freq = [int(f * .5 * multiplier) for f in freq] if all(base_freq[i] > shape[i] for i in range(len(base_freq))): break layer = basic(base_freq, shape, ridges=ridges, spline_order=spline_order, corners=corners, distrib=distrib, mask=mask, mask_inverse=mask_inverse, mask_static=mask_static, lattice_drift=lattice_drift, color_space=color_space, hue_range=hue_range, hue_rotation=hue_rotation, saturation=saturation, hue_distrib=hue_distrib, brightness_distrib=brightness_distrib, brightness_freq=brightness_freq, saturation_distrib=saturation_distrib, octave_effects=octave_effects, octave=octave, time=time, speed=speed) if octave_blending == OctaveBlending.reduce_max: tensor = tf.maximum(tensor, layer) elif octave_blending == OctaveBlending.alpha: a = tf.expand_dims(layer[:, :, -1], -1) tensor = (tensor * (1.0 - a)) + layer * a else: # falloff tensor += layer / multiplier # If the original shape did not include an alpha channel, reduce masked values to 0 (black) if octave_blending == OctaveBlending.alpha and original_shape[2] in (1, 3): a = tensor[:, :, -1] if original_shape[2] == 1: tensor = tf.expand_dims(tensor[:, :, 0] * a, -1) elif original_shape[2] == 3: tensor = tf.stack([tensor[:, :, 0], tensor[:, :, 1], tensor[:, :, 2]], 2) * tf.expand_dims(a, -1) shape = original_shape else: for effect_or_preset in octave_effects: tensor = _apply_octave_effect_or_preset(effect_or_preset, tensor, shape, time, speed, 1) tensor = value.normalize(tensor) for effect_or_preset in post_effects: tensor = _apply_post_effect_or_preset(effect_or_preset, tensor, shape, time, speed) return tensor def multires_old(freq=3, shape=None, octaves=4, ridges=False, sin=0.0, spline_order=InterpolationType.bicubic, distrib=ValueDistribution.uniform, corners=False, mask=None, mask_inverse=False, mask_static=False, time=0.0, speed=1.0, color_space=ColorSpace.hsv, hue_range=.125, hue_rotation=None, saturation=1.0, hue_distrib=None, saturation_distrib=None, brightness_distrib=None, brightness_freq=None, octave_blending=OctaveBlending.falloff, post_ridges=False, reflect_range=0.0, refract_range=0.0, reindex_range=0.0, deriv=False, deriv_metric=0, deriv_alpha=1.0, lattice_drift=0.0, post_reindex_range=0.0, post_reflect_range=0.0, post_refract_range=0.0, post_refract_y_from_offset=True, post_deriv=False, with_reverb=None, reverb_iterations=1, **post_process_args): """ This method is deprecated. Please use multires() instead. :param int|list[int] freq: Bottom layer frequency. Int, or list of ints for each spatial dimension :param list[int]: Shape of noise. For 2D noise, this is [height, width, channels] :param int octaves: Octave count. Number of multi-res layers. Typically 1-8 :param bool ridges: Per-octave "crease" at midpoint values: (1 - abs(n * 2 - 1)) :param bool post_ridges: Post-reduce "crease" at midpoint values: (1 - abs(n * 2 - 1)) :param float sin: Apply sin function to noise basis :param int spline_order: Spline point count. 0=Constant, 1=Linear, 2=Cosine, 3=Bicubic :param
"fill": "orange", "order": "raise"} ], "NIMR_serology": [ {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"} ] } } """, "h3-neut": """{ "_":"-*- js-indent-level: 2 -*-", "mods": { "CDC_serology": [ {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"full_name": "A(H3N2)/SOUTH AUSTRALIA/135/2016 SIAT2/SIAT1 (2018-07-31)"}, "label": {"offset": [-1, 0], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": "A/Maryland/53/2017"}, "label": {"offset": [ 0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"} ], "MELB_serology": [ {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"} ], "NIID_serology": [ {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"} ], "NIMR_serology": [ {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"} ] } } """, "bvic-hi": """{ "_":"-*- js-indent-level: 2 -*-", "mods": { "CDC_serology": [ {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"full_name": "B/IOWA/6/2017 QMC2"}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": "B/HONG KONG/286/2017", "passage": "egg"}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"} ], "MELB_serology": [ {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"} ], "NIID_serology": [ {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"} ], "NIMR_serology": [ {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"} ] } } """, "byam-hi": """{ "_":"-*- js-indent-level: 2 -*-", "mods": { "CDC_serology": [ {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"full_name": "B/GUYANE/5/2018 MDCK2 (2018-05-02)"}, "label": {"offset": [-1, 0], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": "B/PUERTO RICO/5/2018"}, "label": {"offset": [-1, 0], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"} ], "MELB_serology": [ {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"} ], "NIID_serology": [ {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"} ], "NIMR_serology": [ {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"}, {"?N":"antigens", "select": {"name": ""}, "label": {"offset": [0, 1], "size": 24, "name_type": "abbreviated_with_passage_type"}, "report": true, "size": 18, "outline": "black", "fill": "orange", "order": "raise"} ] } } """, } # ====================================================================== # H1 HI # ====================================================================== s_h1_hi_labs = ["ALL"] s_h1_hi_data = """ "set_scale": [ {"N": "point_scale", "scale": 2.5, "outline_scale": 1} ], "set_legend": [ {"N": "legend", "label_size": 14, "point_size": 10} ], "no_legend": [ {"N": "legend", "show": false} ], "information": [ {"N": "point_scale", "scale": 2.5, "outline_scale": 1}, {"N": "antigens", "select": "all", "size": 15}, {"N": "antigens", "select": {"older_than_days": 730}, "fill": "grey80", "outline": "grey80", "order": "lower"}, {"N": "antigens", "select": {"younger_than_days": 730, "older_than_days": 365}, "fill": "#6F93E6", "outline": "black", "raise_": true}, {"N": "antigens", "select": {"younger_than_days": 365}, "fill": "#F9DA4A", "outline": "black", "raise_": true} ], "information_clades": [ {"N": "point_scale", "scale": 2.5, "outline_scale": 1}, {"N": "antigens", "select": "all", "size": 15}, {"N": "clades_last_12_months", "size": 15}, "no_legend" ] """ # --------------- ALL H1 ------------------------------------------------------- s_h1_hi_ALL_data = """ "ALL_flip": [ {"?N": "flip", "direction": "ew"} ], "ALL_rotate": [ {"N": "rotate", "degrees": 0} ], "ALL_viewport": [ {"N": "viewport", "rel": [8, 5.5, -12]} ], "ALL_pre": [ ], "ALL_mid": [ ], "ALL_post": [ ]""" # --------------- CDC H1 ------------------------------------------------------- s_h1_hi_CDC_data = """ "CDC_flip": [ {"?N": "flip", "direction": "ew"} ], "CDC_rotate": [ {"N": "rotate", "degrees": 0} ], "CDC_viewport": [ {"N": "viewport", "rel": [0, 0, 0]} ], "CDC_pre": [ ], "CDC_mid": [ ], "CDC_post": [ ]""" # ====================================================================== # H3 HI # ====================================================================== s_h3_hi_labs = ["CDC", "MELB", "NIMR"] s_h3_hi_data = """ "set_scale": [ {"N": "point_scale", "scale": 2.5, "outline_scale": 1} ], "set_legend": [ {"N": "legend", "label_size": 14, "point_size":
<filename>journal_venv/lib/python3.9/site-packages/cartopy/crs.py # (C) British Crown Copyright 2011 - 2019, Met Office # # This file is part of cartopy. # # cartopy is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # cartopy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with cartopy. If not, see <https://www.gnu.org/licenses/>. """ The crs module defines Coordinate Reference Systems and the transformations between them. """ from __future__ import (absolute_import, division, print_function) from abc import ABCMeta, abstractproperty import math import warnings import numpy as np import shapely.geometry as sgeom from shapely.prepared import prep import six from cartopy._crs import (CRS, Geodetic, Globe, PROJ4_VERSION, WGS84_SEMIMAJOR_AXIS, WGS84_SEMIMINOR_AXIS) from cartopy._crs import Geocentric # noqa: F401 (flake8 = unused import) import cartopy.trace __document_these__ = ['CRS', 'Geocentric', 'Geodetic', 'Globe'] class RotatedGeodetic(CRS): """ Define a rotated latitude/longitude coordinate system with spherical topology and geographical distance. Coordinates are measured in degrees. The class uses proj to perform an ob_tran operation, using the pole_longitude to set a lon_0 then performing two rotations based on pole_latitude and central_rotated_longitude. This is equivalent to setting the new pole to a location defined by the pole_latitude and pole_longitude values in the GeogCRS defined by globe, then rotating this new CRS about it's pole using the central_rotated_longitude value. """ def __init__(self, pole_longitude, pole_latitude, central_rotated_longitude=0.0, globe=None): """ Parameters ---------- pole_longitude Pole longitude position, in unrotated degrees. pole_latitude Pole latitude position, in unrotated degrees. central_rotated_longitude: optional Longitude rotation about the new pole, in degrees. Defaults to 0. globe: optional A :class:`cartopy.crs.Globe`. Defaults to a "WGS84" datum. """ proj4_params = [('proj', 'ob_tran'), ('o_proj', 'latlon'), ('o_lon_p', central_rotated_longitude), ('o_lat_p', pole_latitude), ('lon_0', 180 + pole_longitude), ('to_meter', math.radians(1))] globe = globe or Globe(datum='WGS84') super(RotatedGeodetic, self).__init__(proj4_params, globe=globe) class Projection(six.with_metaclass(ABCMeta, CRS)): """ Define a projected coordinate system with flat topology and Euclidean distance. """ _method_map = { 'Point': '_project_point', 'LineString': '_project_line_string', 'LinearRing': '_project_linear_ring', 'Polygon': '_project_polygon', 'MultiPoint': '_project_multipoint', 'MultiLineString': '_project_multiline', 'MultiPolygon': '_project_multipolygon', } @abstractproperty def boundary(self): pass @abstractproperty def threshold(self): pass @abstractproperty def x_limits(self): pass @abstractproperty def y_limits(self): pass @property def cw_boundary(self): try: boundary = self._cw_boundary except AttributeError: boundary = sgeom.LinearRing(self.boundary) self._cw_boundary = boundary return boundary @property def ccw_boundary(self): try: boundary = self._ccw_boundary except AttributeError: boundary = sgeom.LinearRing(self.boundary.coords[::-1]) self._ccw_boundary = boundary return boundary @property def domain(self): try: domain = self._domain except AttributeError: domain = self._domain = sgeom.Polygon(self.boundary) return domain def _determine_longitude_bounds(self, central_longitude): # In new proj, using exact limits will wrap-around, so subtract a # small epsilon: epsilon = 1e-10 minlon = -180 + central_longitude maxlon = 180 + central_longitude if central_longitude > 0: maxlon -= epsilon elif central_longitude < 0: minlon += epsilon return minlon, maxlon def _repr_html_(self): if not six.PY2: from html import escape else: from cgi import escape try: # As matplotlib is not a core cartopy dependency, don't error # if it's not available. import matplotlib.pyplot as plt except ImportError: # We can't return an SVG of the CRS, so let Jupyter fall back to # a default repr by returning None. return None # Produce a visual repr of the Projection instance. fig, ax = plt.subplots(figsize=(5, 3), subplot_kw={'projection': self}) ax.set_global() ax.coastlines('auto') ax.gridlines() buf = six.StringIO() fig.savefig(buf, format='svg', bbox_inches='tight') plt.close(fig) # "Rewind" the buffer to the start and return it as an svg string. buf.seek(0) svg = buf.read() return '{}<pre>{}</pre>'.format(svg, escape(repr(self))) def _as_mpl_axes(self): import cartopy.mpl.geoaxes as geoaxes return geoaxes.GeoAxes, {'map_projection': self} def project_geometry(self, geometry, src_crs=None): """ Project the given geometry into this projection. Parameters ---------- geometry The geometry to (re-)project. src_crs: optional The source CRS. Defaults to None. If src_crs is None, the source CRS is assumed to be a geodetic version of the target CRS. Returns ------- geometry The projected result (a shapely geometry). """ if src_crs is None: src_crs = self.as_geodetic() elif not isinstance(src_crs, CRS): raise TypeError('Source CRS must be an instance of CRS' ' or one of its subclasses, or None.') geom_type = geometry.geom_type method_name = self._method_map.get(geom_type) if not method_name: raise ValueError('Unsupported geometry ' 'type {!r}'.format(geom_type)) return getattr(self, method_name)(geometry, src_crs) def _project_point(self, point, src_crs): return sgeom.Point(*self.transform_point(point.x, point.y, src_crs)) def _project_line_string(self, geometry, src_crs): return cartopy.trace.project_linear(geometry, src_crs, self) def _project_linear_ring(self, linear_ring, src_crs): """ Project the given LinearRing from the src_crs into this CRS and returns a list of LinearRings and a single MultiLineString. """ debug = False # 1) Resolve the initial lines into projected segments # 1abc # def23ghi # jkl41 multi_line_string = cartopy.trace.project_linear(linear_ring, src_crs, self) # Threshold for whether a point is close enough to be the same # point as another. threshold = max(np.abs(self.x_limits + self.y_limits)) * 1e-5 # 2) Simplify the segments where appropriate. if len(multi_line_string) > 1: # Stitch together segments which are close to continuous. # This is important when: # 1) The first source point projects into the map and the # ring has been cut by the boundary. # Continuing the example from above this gives: # def23ghi # jkl41abc # 2) The cut ends of segments are too close to reliably # place into an order along the boundary. line_strings = list(multi_line_string) any_modified = False i = 0 if debug: first_coord = np.array([ls.coords[0] for ls in line_strings]) last_coord = np.array([ls.coords[-1] for ls in line_strings]) print('Distance matrix:') np.set_printoptions(precision=2) x = first_coord[:, np.newaxis, :] y = last_coord[np.newaxis, :, :] print(np.abs(x - y).max(axis=-1)) while i < len(line_strings): modified = False j = 0 while j < len(line_strings): if i != j and np.allclose(line_strings[i].coords[0], line_strings[j].coords[-1], atol=threshold): if debug: print('Joining together {} and {}.'.format(i, j)) last_coords = list(line_strings[j].coords) first_coords = list(line_strings[i].coords)[1:] combo = sgeom.LineString(last_coords + first_coords) if j < i: i, j = j, i del line_strings[j], line_strings[i] line_strings.append(combo) modified = True any_modified = True break else: j += 1 if not modified: i += 1 if any_modified: multi_line_string = sgeom.MultiLineString(line_strings) # 3) Check for rings that have been created by the projection stage. rings = [] line_strings = [] for line in multi_line_string: if len(line.coords) > 3 and np.allclose(line.coords[0], line.coords[-1], atol=threshold): result_geometry = sgeom.LinearRing(line.coords[:-1]) rings.append(result_geometry) else: line_strings.append(line) # If we found any rings, then we should re-create the multi-line str. if rings: multi_line_string = sgeom.MultiLineString(line_strings) return rings, multi_line_string def _project_multipoint(self, geometry, src_crs): geoms = [] for geom in geometry.geoms: geoms.append(self._project_point(geom, src_crs)) if geoms: return sgeom.MultiPoint(geoms) else: return sgeom.MultiPoint() def _project_multiline(self, geometry, src_crs): geoms = [] for geom in geometry.geoms: r = self._project_line_string(geom, src_crs) if r: geoms.extend(r.geoms) if geoms: return sgeom.MultiLineString(geoms) else: return [] def _project_multipolygon(self, geometry, src_crs): geoms = [] for geom in geometry.geoms: r = self._project_polygon(geom, src_crs) if r: geoms.extend(r.geoms) if geoms: result = sgeom.MultiPolygon(geoms) else: result = sgeom.MultiPolygon() return result def _project_polygon(self, polygon, src_crs): """ Return the projected polygon(s) derived from the given polygon. """ # Determine orientation of polygon. # TODO: Consider checking the internal rings have the opposite # orientation to the external rings? if src_crs.is_geodetic(): is_ccw = True else: is_ccw = polygon.exterior.is_ccw # Project the polygon exterior/interior rings. # Each source ring will result in either a ring, or one or more # lines. rings = [] multi_lines = [] for src_ring in [polygon.exterior] + list(polygon.interiors): p_rings, p_mline = self._project_linear_ring(src_ring, src_crs) if p_rings: rings.extend(p_rings) if len(p_mline) > 0: multi_lines.append(p_mline) # Convert any lines to rings by attaching them to the boundary. if multi_lines: rings.extend(self._attach_lines_to_boundary(multi_lines, is_ccw)) # Resolve all the inside vs. outside rings, and convert to the # final MultiPolygon. return self._rings_to_multi_polygon(rings, is_ccw) def _attach_lines_to_boundary(self, multi_line_strings, is_ccw): """ Return a list of LinearRings by attaching the ends of the given lines to the boundary, paying attention to the traversal directions of the lines and boundary. """ debug = False debug_plot_edges = False # Accumulate all the boundary and segment end points, along with #
<reponame>cheginit/pydeymet """Core class for the Daymet functions.""" import functools import warnings from datetime import datetime from typing import Dict, Iterable, List, Optional, Tuple, TypeVar, Union import numpy as np import pandas as pd import shapely.geometry as sgeom import xarray as xr from pydantic import BaseModel, validator from .exceptions import InvalidInputRange, InvalidInputType, InvalidInputValue try: from numba import njit, prange ngjit = functools.partial(njit, cache=True, nogil=True, parallel=True) HAS_NUMBA = True except ImportError: HAS_NUMBA = False prange = range def ngjit(ntypes): # type: ignore def decorator_njit(func): # type: ignore @functools.wraps(func) def wrapper_decorator(*args, **kwargs): # type: ignore return func(*args, **kwargs) return wrapper_decorator return decorator_njit DF = TypeVar("DF", pd.DataFrame, xr.Dataset) DEF_CRS = "epsg:4326" DATE_FMT = "%Y-%m-%d" # Default snow params from https://doi.org/10.5194/gmd-11-1077-2018 T_RAIN = 2.5 # degC T_SNOW = 0.6 # degC __all__ = ["Daymet"] class DaymetBase(BaseModel): """Base class for validating Daymet requests. Parameters ---------- pet : str, optional Method for computing PET. Supported methods are ``penman_monteith``, ``priestley_taylor``, ``hargreaves_samani``, and None (don't compute PET). The ``penman_monteith`` method is based on :footcite:t:`Allen_1998` assuming that soil heat flux density is zero. The ``priestley_taylor`` method is based on :footcite:t:`Priestley_1972` assuming that soil heat flux density is zero. The ``hargreaves_samani`` method is based on :footcite:t:`Hargreaves_1982`. Defaults to ``None``. snow : bool, optional Compute snowfall from precipitation and minimum temperature. Defaults to ``False``. time_scale : str, optional Data time scale which can be daily, monthly (monthly summaries), or annual (annual summaries). Defaults to daily. variables : list, optional List of variables to be downloaded. The acceptable variables are: ``tmin``, ``tmax``, ``prcp``, ``srad``, ``vp``, ``swe``, ``dayl`` Descriptions can be found `here <https://daymet.ornl.gov/overview>`__. Defaults to None i.e., all the variables are downloaded. region : str, optional Region in the US, defaults to na. Acceptable values are: * na: Continental North America * hi: Hawaii * pr: Puerto Rico References ---------- .. footbibliography:: """ pet: Optional[str] = None snow: bool = False time_scale: str = "daily" variables: List[str] = ["all"] region: str = "na" @validator("pet") def _pet(cls, v: Optional[str]) -> Optional[str]: valid_methods = ["penman_monteith", "hargreaves_samani", "priestley_taylor", None] if v not in valid_methods: raise InvalidInputValue("pet", valid_methods) return v @validator("variables") def _variables(cls, v: List[str], values: Dict[str, str]) -> List[str]: valid_variables = ["dayl", "prcp", "srad", "swe", "tmax", "tmin", "vp"] if "all" in v: return valid_variables if not set(v).issubset(set(valid_variables)): raise InvalidInputValue("variables", valid_variables) if values["pet"] is not None: v = list(set(v).union({"tmin", "tmax", "srad", "dayl"})) if values["snow"]: v = list(set(v).union({"tmin"})) return v @validator("time_scale") def _timescales(cls, v: str, values: Dict[str, str]) -> str: valid_timescales = ["daily", "monthly", "annual"] if v not in valid_timescales: raise InvalidInputValue("time_scale", valid_timescales) if values["pet"] is not None and v != "daily": msg = "PET can only be computed at daily scale i.e., time_scale must be daily." raise InvalidInputRange(msg) return v @validator("region") def _regions(cls, v: str) -> str: valid_regions = ["na", "hi", "pr"] if v not in valid_regions: raise InvalidInputValue("region", valid_regions) return v @ngjit("f8[::1](f8[::1], f8[::1], f8, f8)") # type: ignore def _separate_snow( prcp: np.ndarray, tmin: np.ndarray, t_rain: float = T_RAIN, t_snow: float = T_SNOW # type: ignore ) -> np.ndarray: # type: ignore """Separate snow in precipitation.""" t_rng = t_rain - t_snow snow = np.zeros_like(prcp) for t in prange(prcp.shape[0]): if tmin[t] > t_rain: snow[t] = 0.0 elif tmin[t] < t_snow: snow[t] = prcp[t] else: snow[t] = prcp[t] * (t_rain - tmin[t]) / t_rng return snow class Daymet: """Base class for Daymet requests. Parameters ---------- variables : str or list or tuple, optional List of variables to be downloaded. The acceptable variables are: ``tmin``, ``tmax``, ``prcp``, ``srad``, ``vp``, ``swe``, ``dayl`` Descriptions can be found `here <https://daymet.ornl.gov/overview>`__. Defaults to None i.e., all the variables are downloaded. pet : str, optional Method for computing PET. Supported methods are ``penman_monteith``, ``priestley_taylor``, ``hargreaves_samani``, and None (don't compute PET). The ``penman_monteith`` method is based on :footcite:t:`Allen_1998` assuming that soil heat flux density is zero. The ``priestley_taylor`` method is based on :footcite:t:`Priestley_1972` assuming that soil heat flux density is zero. The ``hargreaves_samani`` method is based on :footcite:t:`Hargreaves_1982`. Defaults to ``None``. snow : bool, optional Compute snowfall from precipitation and minimum temperature. Defaults to ``False``. time_scale : str, optional Data time scale which can be daily, monthly (monthly summaries), or annual (annual summaries). Defaults to daily. region : str, optional Region in the US, defaults to na. Acceptable values are: * na: Continental North America * hi: Hawaii * pr: Puerto Rico References ---------- .. footbibliography:: """ def __init__( self, variables: Optional[Union[Iterable[str], str]] = None, pet: Optional[str] = None, snow: bool = False, time_scale: str = "daily", region: str = "na", ) -> None: _variables = ["all"] if variables is None else variables _variables = [_variables] if isinstance(_variables, str) else _variables validated = DaymetBase( variables=_variables, pet=pet, snow=snow, time_scale=time_scale, region=region ) self.variables = validated.variables self.pet = validated.pet self.time_scale = validated.time_scale self.region = validated.region self.snow = validated.snow self.region_bbox = { "na": sgeom.box(-136.8989, 6.0761, -6.1376, 69.077), "hi": sgeom.box(-160.3055, 17.9539, -154.7715, 23.5186), "pr": sgeom.box(-67.9927, 16.8443, -64.1195, 19.9381), } self.invalid_bbox_msg = "\n".join( [ f"Input coordinates are outside the Daymet range for region ``{region}``.", f"Valid bounding box is: {self.region_bbox[region].bounds}", ] ) if self.region == "pr": self.valid_start = pd.to_datetime("1950-01-01") else: self.valid_start = pd.to_datetime("1980-01-01") self.valid_end = pd.to_datetime(f"{datetime.now().year - 1}-12-31") self._invalid_yr = ( "Daymet database ranges from " + f"{self.valid_start.year} to {self.valid_end.year}." ) self.time_codes = {"daily": 1840, "monthly": 1855, "annual": 1852} self.daymet_table = pd.DataFrame( { "Parameter": [ "Day length", "Precipitation", "Shortwave radiation", "Snow water equivalent", "Maximum air temperature", "Minimum air temperature", "Water vapor pressure", ], "Abbr": ["dayl", "prcp", "srad", "swe", "tmax", "tmin", "vp"], "Units": ["s/day", "mm/day", "W/m2", "kg/m2", "degrees C", "degrees C", "Pa"], "Description": [ "Duration of the daylight period in seconds per day. " + "This calculation is based on the period of the day during which the " + "sun is above a hypothetical flat horizon", "Daily total precipitation in millimeters per day, sum of" + " all forms converted to water-equivalent. Precipitation occurrence on " + "any given day may be ascertained.", "Incident shortwave radiation flux density in watts per square meter, " + "taken as an average over the daylight period of the day. " + "NOTE: Daily total radiation (MJ/m2/day) can be calculated as follows: " + "((srad (W/m2) * dayl (s/day)) / l,000,000)", "Snow water equivalent in kilograms per square meter." + " The amount of water contained within the snowpack.", "Daily maximum 2-meter air temperature in degrees Celsius.", "Daily minimum 2-meter air temperature in degrees Celsius.", "Water vapor pressure in pascals. Daily average partial pressure of water vapor.", ], } ) self.units = dict(zip(self.daymet_table["Abbr"], self.daymet_table["Units"])) @staticmethod def check_dates(dates: Union[Tuple[str, str], Union[int, List[int]]]) -> None: """Check if input dates are in correct format and valid.""" if not isinstance(dates, (tuple, list, int)): raise InvalidInputType( "dates", "tuple, list, or int", "(start, end), year, or [years, ...]" ) if isinstance(dates, tuple) and len(dates) != 2: raise InvalidInputType( "dates", "Start and end should be passed as a tuple of length 2." ) def dates_todict(self, dates: Tuple[str, str]) -> Dict[str, str]: """Set dates by start and end dates as a tuple, (start, end).""" if not isinstance(dates, tuple) or len(dates) != 2: raise InvalidInputType("dates", "tuple", "(start, end)") start = pd.to_datetime(dates[0]) end = pd.to_datetime(dates[1]) if self.time_scale == "monthly": start = start.replace(day=14) end = end.replace(day=17) if self.time_scale == "annual": start = start.replace(day=6) end = end.replace(day=8) if start < self.valid_start or end > self.valid_end: raise InvalidInputRange(self._invalid_yr) return { "start": start.strftime(DATE_FMT), "end": end.strftime(DATE_FMT), } def years_todict(self, years: Union[List[int], int]) -> Dict[str, str]: """Set date by list of year(s).""" years = [years] if isinstance(years, int) else years if min(years) < self.valid_start.year or max(years) > self.valid_end.year: raise InvalidInputRange(self._invalid_yr) return {"years": ",".join(str(y) for y in years)} def dates_tolist(self, dates: Tuple[str, str]) -> List[Tuple[pd.Timestamp, pd.Timestamp]]: """Correct dates for Daymet accounting for leap years. Daymet doesn't account for leap years and removes Dec 31 when it's leap year. Parameters ---------- dates : tuple Target start and end dates. Returns ------- list All the dates in the Daymet database within the provided date range. """ date_dict = self.dates_todict(dates) start = pd.to_datetime(date_dict["start"]) + pd.DateOffset(hour=12) end = pd.to_datetime(date_dict["end"]) + pd.DateOffset(hour=12) period =
baud <= 2400: deviatn = 5100 elif baud <= 38400: deviatn = 20000 * (old_div((baud-2400),36000)) else: deviatn = 129000 * (old_div((baud-38400),211600)) self.setMdmDeviatn(deviatn) def calculatePktChanBW(self, mhz=24, radiocfg=None): ''' calculates the optimal ChanBW setting for the current freq/baud * totally experimental * from Smart RF Studio: 1.2 kbaud BW: 63khz 2.4 kbaud BW: 63khz 38.4kbaud BW: 94khz 250 kbaud BW: 600khz ''' freq, freqhex = self.getFreq() center_freq = freq + 14000000 freq_uncertainty = 20e-6 * freq # +-20ppm freq_uncertainty *= 2 # both xmitter and receiver #minbw = (2 * freq_uncertainty) + self.getMdmDRate() # uncertainty for both sender/receiver minbw = (self.getMdmDRate() + freq_uncertainty) possibles = [ 53e3,63e3,75e3,93e3,107e3,125e3,150e3,188e3,214e3,250e3,300e3,375e3,428e3,500e3,600e3,750e3, ] for bw in possibles: #if (.8 * bw) > minbw: # can't occupy more the 80% of BW if (bw) > minbw: break self.setMdmChanBW(bw, mhz, radiocfg) def calculateFsIF(self, mhz=24, radiocfg=None): ''' calculates the optimal IF setting for the current freq/baud * totally experimental * 1.2 kbaud IF: 140khz 2.4 kbaud IF: 140khz 38.4kbaud IF: 164khz (140khz for "sensitive" version) 250 kbaud IF: 281khz 500 kbaud IF: 328khz ''' pass def calculateFsOffset(self, mhz=24, radiocfg=None): ''' calculates the optimal FreqOffset setting for the current freq/baud * totally experimental * ''' pass def getRSSI(self): rssi = self.peek(RSSI) return rssi def getLQI(self): lqi = self.peek(LQI) return lqi def setAESmode(self, aesmode=AES_CRYPTO_DEFAULT): ''' set AES crypto co-processor mode. crypto operations on inbound and outbound RF packets are independently supported as is the type of operation. normally this would be ENCRYPT on outbound and DECRYPT on inbound. aesmode is a bitfield. the upper half mirrors the CC1111 standard modes (CBC, ECB etc.), and the lower half flags whether to encrypt or not on inbound/outbound as well as which operation to perform: aesmode[7:4] ENCCS_MODE... aesmode[3] OUTBOUND 0 == OFF, 1 == ON aesmode[2] OUTBOUND 0 == Decrypt, 1 == Encrypt aesmode[1] INBOUND 0 == OFF, 1 == ON aesmode[0] INBOUND 0 == Decrypt, 1 == Encrypt the following are defined in chipcondefs. valid CC1111 modes are: ENCCS_MODE_CBC ENCCS_MODE_CBCMAC ENCCS_MODE_CFB ENCCS_MODE_CTR ENCCS_MODE_ECB ENCCS_MODE_OFB valid AES operational modes are: AES_CRYPTO_IN_ON AES_CRYPTO_IN_OFF AES_CRYPTO_IN_ENCRYPT AES_CRYPTO_IN_DECRYPT AES_CRYPTO_OUT_ON AES_CRYPTO_OUT_OFF AES_CRYPTO_OUT_ENCRYPT AES_CRYPTO_OUT_DECRYPT aesmode is made up of the appropriate combination of the above. default is CBC mode, crypto enabled IN and OUT: (ENCCS_MODE_CBC | AES_CRYPTO_OUT_ON | AES_CRYPTO_OUT_ENCRYPT | AES_CRYPTO_IN_ON | AES_CRYPTO_IN_DECRYPT) ''' return self.send(APP_NIC, NIC_SET_AES_MODE, "%c"%aesmode) def getAESmode(self): ''' get the currently set AES co-processor mode ''' return self.send(APP_NIC, NIC_GET_AES_MODE, "") def setAESiv(self, iv= '\0'*16): ''' set the AES IV. this will persist until the next reboot, but it should be noted that some modes update the IV automatically with each operation, so care must be taken with the higher level protocol to ensure lost packets etc. do not cause synchronisation problems. IV must be 128 bits. ''' return self.send(APP_NIC, NIC_SET_AES_IV, iv) def setAESkey(self, key= '\0'*16): ''' set the AES key. this will persist until the next reboot. key must be 128 bits. ''' return self.send(APP_NIC, NIC_SET_AES_KEY, key) def setAmpMode(self, ampmode=0): ''' set the amplifier mode (RF amp external to CC1111) ''' return self.send(APP_NIC, NIC_SET_AMP_MODE, "%c"%ampmode) def getAmpMode(self): ''' get the amplifier mode (RF amp external to CC1111) ''' return self.send(APP_NIC, NIC_GET_AMP_MODE, "") def setPktAddr(self, addr): return self.poke(ADDR, correctbytes(addr)) def getPktAddr(self): return self.peek(ADDR) def setEnDeCoder(self, endec=None): self.endec = endec ##### RADIO XMIT/RECV and UTILITY FUNCTIONS ##### # set repeat & offset to optionally repeat tx of a section of the data block. repeat of 65535 means 'forever' def RFxmit(self, data, repeat=0, offset=0): # encode, if necessary if self.endec is not None: data = self.endec.encode(data) if len(data) > RF_MAX_TX_BLOCK: if repeat or offset: return PY_TX_BLOCKSIZE_INCOMPAT return self.RFxmitLong(data, doencoding=False) # calculate wait time waitlen = len(data) waitlen += repeat * (len(data) - offset) wait = USB_TX_WAIT * ((old_div(waitlen, RF_MAX_TX_BLOCK)) + 1) self.send(APP_NIC, NIC_XMIT, "%s" % struct.pack("<HHH",len(data),repeat,offset)+data, wait=wait) def RFxmitLong(self, data, doencoding=True): # encode, if necessary if self.endec is not None and doencoding: data = self.endec.encode(data) if len(data) > RF_MAX_TX_LONG: return PY_TX_BLOCKSIZE_TOO_LARGE datalen = len(data) # calculate wait time waitlen = len(data) wait = USB_TX_WAIT * ((old_div(waitlen, RF_MAX_TX_BLOCK)) + 1) # load chunk buffers chunks = [] for x in range(old_div(datalen, RF_MAX_TX_CHUNK)): chunks.append(data[x * RF_MAX_TX_CHUNK:(x + 1) * RF_MAX_TX_CHUNK]) if datalen % RF_MAX_TX_CHUNK: chunks.append(data[-(datalen % RF_MAX_TX_CHUNK):]) preload = old_div(RF_MAX_TX_BLOCK, RF_MAX_TX_CHUNK) retval, ts = self.send(APP_NIC, NIC_XMIT_LONG, "%s" % struct.pack("<HB",datalen,preload)+data[:RF_MAX_TX_CHUNK * preload], wait=wait*preload) #sys.stderr.write('=' + repr(retval)) error = struct.unpack("<B", retval[0])[0] if error: return error chlen = len(chunks) for chidx in range(preload, chlen): chunk = chunks[chidx] error = RC_TEMP_ERR_BUFFER_NOT_AVAILABLE while error == RC_TEMP_ERR_BUFFER_NOT_AVAILABLE: retval,ts = self.send(APP_NIC, NIC_XMIT_LONG_MORE, "%s" % struct.pack("B", len(chunk))+chunk, wait=wait) error = struct.unpack("<B", retval[0])[0] if error: return error #if error == RC_TEMP_ERR_BUFFER_NOT_AVAILABLE: # sys.stderr.write('.') #sys.stderr.write('+') # tell dongle we've finished retval,ts = self.send(APP_NIC, NIC_XMIT_LONG_MORE, "%s" % struct.pack("B", 0), wait=wait) return struct.unpack("<b", retval[0])[0] def RFtestLong(self, data="BLAHabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZblahaBcDeFgHiJkLmNoPqRsTuVwXyZBLahAbCdEfGhIjKlMnOpQrStUvWxYz"): datalen = len(data) chunks = [] while len(data): chunks.append(data[:RF_MAX_TX_CHUNK]) data = data[RF_MAX_TX_CHUNK:] retval, ts = self.send(APP_NIC, NIC_XMIT_LONG, "%s" % struct.pack("<H",datalen)+chunks[0], wait=1000) sys.stderr.write('=' + repr(retval)) # set blocksize to larger than 255 to receive large blocks or 0 to revert to normal def RFrecv(self, timeout=USB_RX_WAIT, blocksize=None): if not blocksize == None: if blocksize > EP5OUT_BUFFER_SIZE: raise Exception("Blocksize too large. Maximum %d" % EP5OUT_BUFFER_SIZE) self.send(APP_NIC, NIC_SET_RECV_LARGE, "%s" % struct.pack("<H",blocksize)) data = self.recv(APP_NIC, NIC_RECV, timeout) # decode, if necessary if self.endec is not None: # strip off timestamp, process data, then reapply timestamp to continue msg, ts = data msg = self.endec.decode(msg) data = msg, ts return data def RFlisten(self): ''' just sit and dump packets as they come in kinda like discover() but without changing any of the communications settings ''' print("Entering RFlisten mode... packets arriving will be displayed on the screen") print("(press Enter to stop)") while not keystop(): try: y, t = self.RFrecv() print("(%5.3f) Received: %s | %s" % (t, y.encode('hex'), makeFriendlyAscii(y))) except ChipconUsbTimeoutException: pass except KeyboardInterrupt: print("Please press <enter> to stop") sys.stdin.read(1) def RFcapture(self): ''' dump packets as they come in, but return a list of packets when you exit capture mode. kinda like discover() but without changing any of the communications settings ''' capture = [] print("Entering RFlisten mode... packets arriving will be displayed on the screen (and returned in a list)") print("(press Enter to stop)") while not keystop(): try: y, t = self.RFrecv() #print "(%5.3f) Received: %s" % (t, y.encode('hex')) print("(%5.3f) Received: %s | %s" % (t, y.encode('hex'), makeFriendlyAscii(y))) capture.append((y,t)) except ChipconUsbTimeoutException: pass except KeyboardInterrupt: print("Please press <enter> to stop") sys.stdin.read(1) return capture def discover(self, lowball=1, debug=None, length=30, IdentSyncWord=False, ISWsensitivity=4, ISWminpreamble=2, SyncWordMatchList=None, Search=None, RegExpSearch=None): ''' discover() sets lowball mode to the mode requested (length too), and begins to dump packets to the screen. press <enter> to quit, and your radio config will be set back to its original configuration. lowball - lowball level of choosing (see help on lowball) debug - sets _debug to this setting if not None. length - arbitrary length of bytes we want to see per pseudopacket. (should be enough to identify interesting packets, but not too long) IdentSyncWord - look for preamble in each packet and determine possible sync-words in use SyncWordMatchList - attempt to find *these* sync words (provide a list) Search - byte string to search through each received packet for (real bytes, not hex repr) RegExpSearch - regular expression to search through received bytes (not the hex repr that is printed) if IdentSyncWord == True (or SyncWordMatchList != None), returns a dict of unique possible SyncWords identified along with the number of times seen. ''' retval = {} oldebug = self._debug if SyncWordMatchList != None: IdentSyncWord = True if IdentSyncWord: if lowball <= 1: print("Entering Discover mode and searching for possible SyncWords...") if SyncWordMatchList != None: print(" seeking one of: %s" % repr([hex(x) for x in SyncWordMatchList])) else: print("-- lowball too high -- ignoring request to IdentSyncWord") print("Entering Discover mode...") IdentSyncWord = False self.lowball(level=lowball, length=length) if debug is not None: self._debug
# coding=utf-8 # Copyright 2019 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Download manager interface.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import uuid from absl import logging import promise import six import tensorflow as tf from tensorflow_datasets.core import api_utils from tensorflow_datasets.core import constants from tensorflow_datasets.core import utils from tensorflow_datasets.core.download import downloader from tensorflow_datasets.core.download import extractor from tensorflow_datasets.core.download import resource as resource_lib from tensorflow_datasets.core.download import util class NonMatchingChecksumError(Exception): """The downloaded file doesn't have expected checksum.""" def __init__(self, url, tmp_path): msg = 'Artifact %s, downloaded to %s, has wrong checksum.' % (url, tmp_path) Exception.__init__(self, msg) class DownloadConfig(object): """Configuration for `tfds.core.DatasetBuilder.download_and_prepare`.""" def __init__(self, extract_dir=None, manual_dir=None, download_mode=None, compute_stats=None, max_examples_per_split=None): """Constructs a `DownloadConfig`. Args: extract_dir: `str`, directory where extracted files are stored. Defaults to "<download_dir>/extracted". manual_dir: `str`, read-only directory where manually downloaded/extracted data is stored. Defaults to "<download_dir>/manual". download_mode: `tfds.GenerateMode`, how to deal with downloads or data that already exists. Defaults to `REUSE_DATASET_IF_EXISTS`, which will reuse both downloads and data if it already exists. compute_stats: `tfds.download.ComputeStats`, whether to compute statistics over the generated data. Defaults to `AUTO`. max_examples_per_split: `int`, optional max number of examples to write into each split. """ self.extract_dir = extract_dir self.manual_dir = manual_dir self.download_mode = util.GenerateMode( download_mode or util.GenerateMode.REUSE_DATASET_IF_EXISTS) self.compute_stats = util.ComputeStatsMode( compute_stats or util.ComputeStatsMode.AUTO) self.max_examples_per_split = max_examples_per_split class DownloadManager(object): """Manages the download and extraction of files, as well as caching. Downloaded files are cached under `download_dir`. The file name of downloaded files follows pattern "${sanitized_url}${content_checksum}.${ext}". Eg: 'cs.toronto.edu_kriz_cifar-100-pythonJDF[...]I.tar.gz'. While a file is being downloaded, it is placed into a directory following a similar but different pattern: "%{sanitized_url}${url_checksum}.tmp.${uuid}". When a file is downloaded, a "%{fname}s.INFO.json" file is created next to it. This INFO file contains the following information: {"dataset_names": ["name1", "name2"], "urls": ["http://url.of/downloaded_file"]} Extracted files/dirs are stored under `extract_dir`. The file name or directory name is the same as the original name, prefixed with the extraction method. E.g. "${extract_dir}/TAR_GZ.cs.toronto.edu_kriz_cifar-100-pythonJDF[...]I.tar.gz". The function members accept either plain value, or values wrapped into list or dict. Giving a data structure will parallelize the downloads. Example of usage: ``` # Sequential download: str -> str train_dir = dl_manager.download_and_extract('https://abc.org/train.tar.gz') test_dir = dl_manager.download_and_extract('https://abc.org/test.tar.gz') # Parallel download: list -> list image_files = dl_manager.download( ['https://a.org/1.jpg', 'https://a.org/2.jpg', ...]) # Parallel download: dict -> dict data_dirs = dl_manager.download_and_extract({ 'train': 'https://abc.org/train.zip', 'test': 'https://abc.org/test.zip', }) data_dirs['train'] data_dirs['test'] ``` For more customization on the download/extraction (ex: passwords, output_name, ...), you can pass a `tfds.download.Resource` as argument. """ @api_utils.disallow_positional_args def __init__(self, download_dir, extract_dir=None, manual_dir=None, dataset_name=None, checksums=None, force_download=False, force_extraction=False): """Download manager constructor. Args: download_dir: `str`, path to directory where downloads are stored. extract_dir: `str`, path to directory where artifacts are extracted. manual_dir: `str`, path to manually downloaded/extracted data directory. dataset_name: `str`, name of dataset this instance will be used for. If provided, downloads will contain which datasets they were used for. checksums: `dict<str url, str sha256>`, url to sha256 of resource. Only URLs present are checked. If empty, checksum of (already) downloaded files is computed and can then be retrieved using `recorded_download_checksums` property. force_download: `bool`, default to False. If True, always [re]download. force_extraction: `bool`, default to False. If True, always [re]extract. """ self._dataset_name = dataset_name self._checksums = checksums or {} self._record_checksum_size = not checksums self._recorded_download_checksums = {} self._download_sizes = {} self._download_dir = os.path.expanduser(download_dir) self._extract_dir = os.path.expanduser( extract_dir or os.path.join(download_dir, 'extracted')) self._manual_dir = manual_dir and os.path.expanduser(manual_dir) tf.io.gfile.makedirs(self._download_dir) tf.io.gfile.makedirs(self._extract_dir) self._force_download = force_download self._force_extraction = force_extraction self._extractor = extractor.get_extractor() self._downloader = downloader.get_downloader() @property def recorded_download_checksums(self): """Returns checksums for downloaded urls.""" return dict(self._recorded_download_checksums) @property def download_sizes(self): """Returns sizes (in bytes) for downloaded urls.""" return dict(self._download_sizes) def _handle_download_result(self, resource, tmp_dir_path, sha256, dl_size): """Store dled file to definitive place, write INFO file, return path.""" fnames = tf.io.gfile.listdir(tmp_dir_path) if len(fnames) > 1: raise AssertionError('More than one file in %s.' % tmp_dir_path) original_fname = fnames[0] tmp_path = os.path.join(tmp_dir_path, original_fname) if self._record_checksum_size: resource.sha256 = sha256 self._download_sizes[resource.url] = dl_size self._recorded_download_checksums[resource.url] = sha256 elif self._checksums[resource.url] != sha256: raise NonMatchingChecksumError(resource.url, tmp_path) resource.write_info_file(self._dataset_name, original_fname) # Unconditionally overwrite because either file doesn't exist or # FORCE_DOWNLOAD=true tf.io.gfile.rename(tmp_path, resource.path, overwrite=True) tf.io.gfile.rmtree(tmp_dir_path) return resource.path # synchronize and memoize decorators ensure same resource will only be # processed once, even if passed twice to download_manager. @util.build_synchronize_decorator() @utils.memoize() def _download(self, resource): """Download resource, returns Promise->path to downloaded file.""" if isinstance(resource, six.string_types): resource = resource_lib.Resource(url=resource) resource.sha256 = self._checksums.get(resource.url, None) if not resource.path: resource.path = os.path.join(self._download_dir, resource.fname) if (not self._force_download and resource.sha256 and resource.exists_locally()): logging.info('URL %s already downloaded: reusing %s.', resource.url, resource.path) self._recorded_download_checksums[resource.url] = resource.sha256 self._download_sizes[resource.url] = ( tf.io.gfile.stat(resource.path).length) return promise.Promise.resolve(resource.path) # There is a slight difference between downloader and extractor here: # the extractor manages its own temp directory, while the DownloadManager # manages the temp directory of downloader. tmp_dir_path = '%s.tmp.%s' % (resource.path, uuid.uuid4().hex) tf.io.gfile.makedirs(tmp_dir_path) logging.info('Downloading %s into %s...', resource.url, tmp_dir_path) def callback(val): checksum, dl_size = val return self._handle_download_result(resource, tmp_dir_path, checksum, dl_size) return self._downloader.download(resource, tmp_dir_path).then(callback) @util.build_synchronize_decorator() @utils.memoize() def _extract(self, resource): """Extract a single archive, returns Promise->path to extraction result.""" if isinstance(resource, six.string_types): resource = resource_lib.Resource(path=resource) if resource.extract_method == resource_lib.ExtractMethod.NO_EXTRACT: logging.info( 'Skipping extraction for %s (method=NO_EXTRACT).', resource.path) return promise.Promise.resolve(resource.path) extract_path = os.path.join(self._extract_dir, resource.extract_fname) if not self._force_extraction and tf.io.gfile.exists(extract_path): logging.info('Reusing extraction of %s at %s.', resource.path, extract_path) return promise.Promise.resolve(extract_path) return self._extractor.extract(resource, extract_path) @util.build_synchronize_decorator() @utils.memoize() def _download_extract(self, resource): """Download-extract `Resource` or url, returns Promise->path.""" if isinstance(resource, six.string_types): resource = resource_lib.Resource(url=resource) def callback(path): resource.path = path return self._extract(resource) return self._download(resource).then(callback) def download_kaggle_data(self, competition_name): """Download data for a given Kaggle competition.""" with self._downloader.tqdm(): kaggle_downloader = self._downloader.kaggle_downloader(competition_name) urls = kaggle_downloader.competition_urls files = kaggle_downloader.competition_files return _map_promise(self._download, dict((f, u) for (f, u) in zip(files, urls))) def download(self, url_or_urls): """Download given url(s). Args: url_or_urls: url or `list`/`dict` of urls to download and extract. Each url can be a `str` or `tfds.download.Resource`. Returns: downloaded_path(s): `str`, The downloaded paths matching the given input url_or_urls. """ # Add progress bar to follow the download state with self._downloader.tqdm(): return _map_promise(self._download, url_or_urls) def iter_archive(self, resource): """Returns iterator over files within archive. **Important Note**: caller should read files as they are yielded. Reading out of order is slow. Args: resource: path to archive or `tfds.download.Resource`. Returns: Generator yielding tuple (path_within_archive, file_obj). """ if isinstance(resource, six.string_types): resource = resource_lib.Resource(path=resource) return extractor.iter_archive(resource.path, resource.extract_method) def extract(self, path_or_paths): """Extract given path(s). Args: path_or_paths: path or `list`/`dict` of path of file to extract. Each path can be a `str` or `tfds.download.Resource`. If not explicitly specified in `Resource`, the extraction method is deduced from downloaded file name. Returns: extracted_path(s): `str`, The extracted paths matching the given input path_or_paths. """ # Add progress bar to follow the download state with self._extractor.tqdm(): return _map_promise(self._extract, path_or_paths) def download_and_extract(self, url_or_urls): """Download and extract given url_or_urls. Is roughly equivalent to: ``` extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls)) ``` Args: url_or_urls: url or `list`/`dict` of urls to download and extract. Each url can be a `str` or `tfds.download.Resource`. If not explicitly specified in `Resource`, the extraction method will automatically be deduced from downloaded file name. Returns: extracted_path(s): `str`, extracted paths of given URL(s). """ # Add progress bar to follow the download state with self._downloader.tqdm(): with self._extractor.tqdm(): return _map_promise(self._download_extract, url_or_urls) @property def manual_dir(self): """Returns the directory containing the manually extracted data.""" if not tf.io.gfile.exists(self._manual_dir): raise AssertionError( 'Manual directory {} does not exist. Create it and download/extract ' 'dataset artifacts in there.'.format(self._manual_dir)) return self._manual_dir # ============================================================================ # In Python 2.X, threading.Condition.wait() cannot be interrupted by SIGINT, # unless it's given a timeout. Here we artificially give a long timeout to # allow ctrl+C. # This code should be deleted once python2 is no longer supported. if sys.version_info[0] > 2: def _wait_on_promise(p): return p.get() else: def _wait_on_promise(p): while True: result = p.get(sys.maxint)
param[2] prime = param[5] fp2 = sidh_fp2.sidh_fp2(prime) error_computation = False # Fixed test tests_already_performed = 0 fixed_tests = [1, prime-1] for test_value_1 in fixed_tests: for test_value_1i in fixed_tests: for test_value_2 in fixed_tests: for test_value_2i in fixed_tests: for test_value_3 in fixed_tests: for test_value_3i in fixed_tests: for test_value_4 in fixed_tests: for test_value_4i in fixed_tests: for test_value_5 in fixed_tests: for test_value_5i in fixed_tests: values_to_load = [test_value_1, test_value_1i, test_value_2, test_value_2i, test_value_3, test_value_3i, test_value_4, test_value_4i, test_value_5, test_value_5i] expected_value_1, expected_value_2 = SIDH_round2_spec.eval_4_isog(fp2, fp2([test_value_1, test_value_1i]), fp2([test_value_2, test_value_2i]), fp2([test_value_3, test_value_3i]), fp2([test_value_4, test_value_4i]), fp2([test_value_5, test_value_5i])) expected_output = [expected_value_1.polynomial()[0], expected_value_1.polynomial()[1], expected_value_2.polynomial()[0], expected_value_2.polynomial()[1]] error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_eval_4_isog_test, debug_mode) tests_already_performed += 1 if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break # Random tests if(not error_computation): for i in range(tests_already_performed, number_of_tests): if(((i %(1000)) == 0)): print(i) test_value_1 = random.randint(1, prime) test_value_1i = random.randint(1, prime) test_value_2 = random.randint(1, prime) test_value_2i = random.randint(1, prime) test_value_3 = random.randint(1, prime) test_value_3i = random.randint(1, prime) test_value_4 = random.randint(1, prime) test_value_4i = random.randint(1, prime) test_value_5 = random.randint(1, prime) test_value_5i = random.randint(1, prime) values_to_load = [test_value_1, test_value_1i, test_value_2, test_value_2i, test_value_3, test_value_3i, test_value_4, test_value_4i, test_value_5, test_value_5i] expected_value_1, expected_value_2 = SIDH_round2_spec.eval_4_isog(fp2, fp2([test_value_1, test_value_1i]), fp2([test_value_2, test_value_2i]), fp2([test_value_3, test_value_3i]), fp2([test_value_4, test_value_4i]), fp2([test_value_5, test_value_5i])) expected_output = [expected_value_1.polynomial()[0], expected_value_1.polynomial()[1], expected_value_2.polynomial()[0], expected_value_2.polynomial()[1]] error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_eval_4_isog_test, debug_mode) if(error_computation): break return error_computation def test_all_sidh_function_eval_4_isog(zedboard, sike_fpga_constants, number_of_tests, only_one_parameter=None): error_computation = False if(only_one_parameter != None): all_testing_parameters = sike_fpga_constants[only_one_parameter:only_one_parameter+1] else: all_testing_parameters = sike_fpga_constants for param in all_testing_parameters: print("Testing SIDH function eval 4 isog " + param[0]) error_computation = test_sidh_function_eval_4_isog(zedboard, param, number_of_tests, debug_mode=False) if(error_computation): break def test_sidh_function_xtple(zedboard, param, number_of_tests, debug_mode=False): load_constants(zedboard, param) number_of_words = param[4] base_word_size = param[1] extended_word_size = param[2] prime = param[5] fp2 = sidh_fp2.sidh_fp2(prime) error_computation = False # Fixed test tests_already_performed = 0 fixed_tests = [1, prime-1] test_value_1 = 100 for test_value_2 in fixed_tests: for test_value_2i in fixed_tests: for test_value_3 in fixed_tests: for test_value_3i in fixed_tests: for test_value_4 in fixed_tests: for test_value_4i in fixed_tests: for test_value_5 in fixed_tests: for test_value_5i in fixed_tests: values_to_load = [test_value_2, test_value_2i, test_value_3, test_value_3i, test_value_4, test_value_4i, test_value_5, test_value_5i, test_value_1] expected_value_1, expected_value_2 = SIDH_round2_spec.xTPLe(fp2, fp2([test_value_2, test_value_2i]), fp2([test_value_3, test_value_3i]), fp2([test_value_5, test_value_5i]), fp2([test_value_4, test_value_4i]), test_value_1) expected_output = [expected_value_1.polynomial()[0], expected_value_1.polynomial()[1], expected_value_2.polynomial()[0], expected_value_2.polynomial()[1]] error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_xTPLe_test, debug_mode) tests_already_performed += 1 if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break # Random tests if(not error_computation): test_value_1 = 100 for i in range(tests_already_performed, number_of_tests): if(((i %(1000)) == 0)): print(i) test_value_2 = random.randint(1, prime) test_value_2i = random.randint(1, prime) test_value_3 = random.randint(1, prime) test_value_3i = random.randint(1, prime) test_value_4 = random.randint(1, prime) test_value_4i = random.randint(1, prime) test_value_5 = random.randint(1, prime) test_value_5i = random.randint(1, prime) values_to_load = [test_value_2, test_value_2i, test_value_3, test_value_3i, test_value_4, test_value_4i, test_value_5, test_value_5i, test_value_1] expected_value_1, expected_value_2 = SIDH_round2_spec.xTPLe(fp2, fp2([test_value_2, test_value_2i]), fp2([test_value_3, test_value_3i]), fp2([test_value_5, test_value_5i]), fp2([test_value_4, test_value_4i]), test_value_1) expected_output = [expected_value_1.polynomial()[0], expected_value_1.polynomial()[1], expected_value_2.polynomial()[0], expected_value_2.polynomial()[1]] error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_xTPLe_test, debug_mode) if(error_computation): break return error_computation def test_all_sidh_function_xtple(zedboard, sike_fpga_constants, number_of_tests, only_one_parameter=None): error_computation = False if(only_one_parameter != None): all_testing_parameters = sike_fpga_constants[only_one_parameter:only_one_parameter+1] else: all_testing_parameters = sike_fpga_constants for param in all_testing_parameters: print("Testing SIDH function xtple " + param[0]) error_computation = test_sidh_function_xtple(zedboard, param, number_of_tests, debug_mode=False) if(error_computation): break def test_sidh_function_get_3_isog(zedboard, param, number_of_tests, debug_mode=False): load_constants(zedboard, param) number_of_words = param[4] base_word_size = param[1] extended_word_size = param[2] prime = param[5] fp2 = sidh_fp2.sidh_fp2(prime) error_computation = False # Fixed test tests_already_performed = 0 fixed_tests = [1, prime-1] for test_value_1 in fixed_tests: for test_value_1i in fixed_tests: for test_value_2 in fixed_tests: for test_value_2i in fixed_tests: values_to_load = [test_value_1, test_value_1i, test_value_2, test_value_2i] expected_value_1, expected_value_2, expected_value_3, expected_value_4 = SIDH_round2_spec.get_3_isog(fp2, fp2([test_value_1, test_value_1i]), fp2([test_value_2, test_value_2i])) expected_output = [expected_value_2.polynomial()[0], expected_value_2.polynomial()[1], expected_value_1.polynomial()[0], expected_value_1.polynomial()[1], expected_value_3.polynomial()[0], expected_value_3.polynomial()[1], expected_value_4.polynomial()[0], expected_value_4.polynomial()[1]] error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_get_3_isog_test, debug_mode) tests_already_performed += 1 if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break # Random tests if(not error_computation): for i in range(tests_already_performed, number_of_tests): if(((i %(1000)) == 0)): print(i) test_value_1 = random.randint(1, prime) test_value_1i = random.randint(1, prime) test_value_2 = random.randint(1, prime) test_value_2i = random.randint(1, prime) values_to_load = [test_value_1, test_value_1i, test_value_2, test_value_2i] expected_value_1, expected_value_2, expected_value_3, expected_value_4 = SIDH_round2_spec.get_3_isog(fp2, fp2([test_value_1, test_value_1i]), fp2([test_value_2, test_value_2i])) expected_output = [expected_value_2.polynomial()[0], expected_value_2.polynomial()[1], expected_value_1.polynomial()[0], expected_value_1.polynomial()[1], expected_value_3.polynomial()[0], expected_value_3.polynomial()[1], expected_value_4.polynomial()[0], expected_value_4.polynomial()[1]] error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_get_3_isog_test, debug_mode) if(error_computation): break return error_computation def test_all_sidh_function_get_3_isog(zedboard, sike_fpga_constants, number_of_tests, only_one_parameter=None): error_computation = False if(only_one_parameter != None): all_testing_parameters = sike_fpga_constants[only_one_parameter:only_one_parameter+1] else: all_testing_parameters = sike_fpga_constants for param in all_testing_parameters: print("Testing SIDH function get 3 isog " + param[0]) error_computation = test_sidh_function_get_3_isog(zedboard, param, number_of_tests, debug_mode=False) if(error_computation): break def test_sidh_function_eval_3_isog(zedboard, param, number_of_tests, debug_mode=False): load_constants(zedboard, param) number_of_words = param[4] base_word_size = param[1] extended_word_size = param[2] prime = param[5] fp2 = sidh_fp2.sidh_fp2(prime) error_computation = False # Fixed test tests_already_performed = 0 fixed_tests = [1, prime-1] for test_value_1 in fixed_tests: for test_value_1i in fixed_tests: for test_value_2 in fixed_tests: for test_value_2i in fixed_tests: for test_value_3 in fixed_tests: for test_value_3i in fixed_tests: for test_value_4 in fixed_tests: for test_value_4i in fixed_tests: values_to_load = [test_value_1, test_value_1i, test_value_2, test_value_2i, test_value_3, test_value_3i, test_value_4, test_value_4i] expected_value_1, expected_value_2 = SIDH_round2_spec.eval_3_isog(fp2, fp2([test_value_1, test_value_1i]), fp2([test_value_2, test_value_2i]), fp2([test_value_3, test_value_3i]), fp2([test_value_4, test_value_4i])) expected_output = [expected_value_1.polynomial()[0], expected_value_1.polynomial()[1], expected_value_2.polynomial()[0], expected_value_2.polynomial()[1]] error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_eval_3_isog_test, debug_mode) tests_already_performed += 1 if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break # Random tests if(not error_computation): for i in range(tests_already_performed, number_of_tests): if(((i %(1000)) == 0)): print(i) test_value_1 = random.randint(1, prime) test_value_1i = random.randint(1, prime) test_value_2 = random.randint(1, prime) test_value_2i = random.randint(1, prime) test_value_3 = random.randint(1, prime) test_value_3i = random.randint(1, prime) test_value_4 = random.randint(1, prime) test_value_4i = random.randint(1, prime) values_to_load = [test_value_1, test_value_1i, test_value_2, test_value_2i, test_value_3, test_value_3i, test_value_4, test_value_4i] expected_value_1, expected_value_2 = SIDH_round2_spec.eval_3_isog(fp2, fp2([test_value_1, test_value_1i]), fp2([test_value_2, test_value_2i]), fp2([test_value_3, test_value_3i]), fp2([test_value_4, test_value_4i])) expected_output = [expected_value_1.polynomial()[0], expected_value_1.polynomial()[1], expected_value_2.polynomial()[0], expected_value_2.polynomial()[1]] error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_eval_3_isog_test, debug_mode) if(error_computation): break return error_computation def test_all_sidh_function_eval_3_isog(zedboard, sike_fpga_constants, number_of_tests, only_one_parameter=None): error_computation = False if(only_one_parameter != None): all_testing_parameters = sike_fpga_constants[only_one_parameter:only_one_parameter+1] else: all_testing_parameters = sike_fpga_constants for param in all_testing_parameters: print("Testing SIDH function eval 3 isog " + param[0]) error_computation = test_sidh_function_eval_3_isog(zedboard, param, number_of_tests, debug_mode=False) if(error_computation): break def test_sidh_function_get_2_isog(zedboard, param, number_of_tests, debug_mode=False): load_constants(zedboard, param) number_of_words = param[4] base_word_size = param[1] extended_word_size = param[2] prime = param[5] fp2 = sidh_fp2.sidh_fp2(prime) error_computation = False # Fixed test tests_already_performed = 0 fixed_tests = [1, prime-1] for test_value_1 in fixed_tests: for test_value_1i in fixed_tests: for test_value_2 in fixed_tests: for test_value_2i in fixed_tests: values_to_load = [test_value_1, test_value_1i, test_value_2, test_value_2i] expected_value_1, expected_value_2 = SIDH_round2_spec.get_2_isog(fp2, fp2([test_value_1, test_value_1i]), fp2([test_value_2, test_value_2i])) expected_output = [expected_value_1.polynomial()[0], expected_value_1.polynomial()[1], expected_value_2.polynomial()[0], expected_value_2.polynomial()[1]] error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_get_2_isog_test, debug_mode) tests_already_performed += 1 if(error_computation): break if(error_computation): break if(error_computation): break if(error_computation): break # Random tests if(not error_computation): for i in range(tests_already_performed, number_of_tests): if(((i %(1000)) == 0)): print(i) test_value_1 = random.randint(1, prime) test_value_1i = random.randint(1, prime) test_value_2 = random.randint(1, prime) test_value_2i = random.randint(1, prime) values_to_load = [test_value_1, test_value_1i, test_value_2, test_value_2i] expected_value_1, expected_value_2 = SIDH_round2_spec.get_2_isog(fp2, fp2([test_value_1, test_value_1i]), fp2([test_value_2, test_value_2i])) expected_output = [expected_value_1.polynomial()[0], expected_value_1.polynomial()[1], expected_value_2.polynomial()[0], expected_value_2.polynomial()[1]] error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_get_2_isog_test, debug_mode) if(error_computation): break return error_computation def test_all_sidh_function_get_2_isog(zedboard, sike_fpga_constants, number_of_tests, only_one_parameter=None): error_computation = False if(only_one_parameter != None): all_testing_parameters = sike_fpga_constants[only_one_parameter:only_one_parameter+1] else: all_testing_parameters = sike_fpga_constants for param in all_testing_parameters: print("Testing SIDH function get 2 isog " + param[0]) error_computation = test_sidh_function_get_2_isog(zedboard, param, number_of_tests, debug_mode=False) if(error_computation): break def test_sidh_function_eval_2_isog(zedboard, param, number_of_tests, debug_mode=False): load_constants(zedboard, param) number_of_words = param[4] base_word_size = param[1] extended_word_size = param[2] prime = param[5] fp2 = sidh_fp2.sidh_fp2(prime) error_computation = False # Fixed test tests_already_performed = 0 fixed_tests = [1, prime-1] for test_value_1 in fixed_tests: for test_value_1i in fixed_tests: for test_value_2 in fixed_tests: for test_value_2i in fixed_tests: for test_value_3 in fixed_tests: for test_value_3i in fixed_tests: for test_value_4 in fixed_tests: for test_value_4i in
(72, 19, 49, 33), (38, 57, 64, 33), (61, 18, 44, 33), (75, 28, 83, 33), (46, 54, 80, 33), (84, 31, 53, 32), (78, 42, 83, 32), (66, 32, 38, 32), (57, 17, 44, 32), (62, 19, 43, 32), (83, 38, 48, 32), (71, 11, 68, 31), (56, 17, 44, 31), (72, 18, 49, 31), (37, 57, 64, 31), (41, 57, 57, 31), (72, 20, 47, 31), (38, 45, 48, 31), (83, 35, 48, 31), (69, 16, 49, 30), (81, 56, 67, 30), (83, 40, 48, 30), (72, 17, 49, 29), (79, 56, 57, 29), (65, 19, 43, 29), (32, 50, 68, 29), (46, 54, 82, 29), (37, 45, 48, 29), (84, 31, 78, 29), (83, 33, 48, 29), (67, 20, 43, 28), (68, 17, 82, 28), (47, 53, 44, 28), (49, 12, 52, 28), (35, 57, 64, 27), (69, 45, 38, 27), (32, 51, 68, 27), (41, 59, 57, 27), (38, 56, 57, 27), (37, 55, 57, 27), (36, 45, 48, 27), (84, 31, 79, 27), (71, 17, 47, 26), (34, 57, 64, 26), (86, 51, 70, 26), (32, 51, 70, 26), (41, 59, 56, 26), (61, 12, 46, 26), (72, 19, 46, 26), (68, 17, 83, 26), (47, 12, 52, 26), (48, 12, 52, 26), (84, 56, 67, 25), (83, 47, 49, 25), (46, 54, 84, 25), (72, 17, 47, 25), (35, 45, 48, 25), (46, 12, 52, 25), (33, 57, 64, 24), (86, 52, 70, 24), (73, 20, 44, 24), (59, 10, 46, 24), (41, 59, 54, 24), (61, 11, 46, 24), (84, 28, 79, 24), (47, 55, 44, 24), (45, 12, 52, 24), (46, 54, 85, 23), (86, 53, 70, 23), (73, 17, 47, 23), (45, 63, 53, 23), (61, 10, 46, 23), (67, 20, 40, 23), (72, 18, 45, 23), (34, 45, 48, 23), (84, 27, 79, 23), (70, 17, 83, 23), (32, 57, 64, 22), (83, 42, 84, 22), (44, 12, 52, 22), (65, 17, 41, 22), (32, 51, 74, 22), (73, 19, 44, 22), (33, 45, 48, 22), (76, 15, 78, 21), (84, 54, 74, 21), (84, 25, 79, 21), (47, 55, 42, 21), (72, 16, 83, 20), (41, 61, 53, 20), (32, 45, 48, 20), (72, 17, 44, 20), (73, 17, 83, 20), (36, 23, 81, 19), (49, 12, 46, 19), (42, 63, 53, 18), (73, 19, 41, 18), (72, 16, 44, 18), (86, 25, 79, 18), (47, 57, 42, 18), (48, 12, 46, 18), (86, 58, 69, 17), (72, 16, 43, 17), (47, 58, 42, 17), (47, 12, 46, 17), (72, 15, 43, 16), (71, 60, 85, 16), (73, 20, 39, 16), (73, 19, 40, 16), (75, 17, 83, 16), (47, 59, 42, 16), (32, 29, 84, 16), (46, 12, 46, 16), (85, 60, 71, 15), (83, 46, 41, 15), (45, 12, 46, 15), (76, 16, 83, 14), (47, 60, 42, 14), (44, 12, 46, 14), (47, 61, 42, 13), (47, 62, 41, 12), (47, 61, 41, 12), (43, 12, 46, 12), (32, 64, 68, 12), (42, 12, 46, 11), (77, 14, 83, 11), (86, 57, 49, 10), (70, 63, 38, 9), (77, 12, 83, 9), (34, 23, 90, 8), (77, 10, 83, 8), (75, 12, 88, 7), (34, 10, 79, 6), (39, 13, 42, 5), (32, 60, 85, 3), (32, 59, 85, 3), (32, 63, 85, 2), (75, 9, 88, -1), (47, 41, 37, -1), (87, 20, 51, -1), (87, 53, 70, -1), (83, 65, 60, -1), (45, 65, 53, -1), (31, 45, 48, -1), (32, 65, 85, -1), (87, 25, 79, -1), (49, 41, 37, -1), (50, 41, 37, -1), (73, 45, 37, -1), (31, 29, 84, -1), (51, 41, 37, -1), (52, 41, 37, -1), (76, 53, 36, -2), (88, 34, 73, -2), (75, 63, 36, -2), (30, 45, 48, -2), (88, 53, 70, -2), (48, 66, 48, -2), (48, 66, 50, -2), (54, 8, 46, -2), (48, 66, 51, -2), (55, 8, 46, -2), (47, 66, 53, -2), (57, 8, 46, -2), (59, 8, 46, -2), (67, 20, 36, -2), (77, 8, 83, -2), (72, 8, 44, -2), (89, 29, 62, -3), (89, 57, 49, -3), (77, 7, 83, -3), (89, 18, 53, -3), (50, 7, 46, -3), (84, 20, 35, -3), (48, 44, 35, -3), (52, 7, 46, -3), (89, 53, 70, -3), (53, 7, 46, -3), (67, 20, 35, -3), (31, 66, 85, -3), (89, 25, 79, -3), (73, 19, 35, -3), (51, 44, 35, -3), (53, 44, 35, -3), (51, 65, 36, -3), (54, 44, 35, -3), (58, 44, 35, -3), (59, 44, 35, -3), (89, 31, 78, -3), (46, 7, 90, -3), (46, 7, 89, -3), (63, 23, 34, -4), (90, 18, 76, -4), (30, 15, 36, -4), (56, 68, 70, -4), (90, 53, 70, -4), (44, 32, 94, -4), (66, 32, 34, -4), (73, 20, 34, -4), (30, 66, 85, -4), (90, 25, 79, -4), (67, 20, 34, -4), (72, 6, 38, -4), (51, 65, 35, -4), (72, 6, 42, -4), (59, 44, 34, -4), (28, 29, 84, -4), (83, 46, 34, -4), (46, 7, 91, -4), (70, 32, 34, -4), (36, 69, 54, -5), (66, 59, 33, -5), (91, 61, 52, -5), (36, 69, 77, -5), (27, 58, 48, -5), (66, 69, 78, -5), (91, 53, 70, -5), (63, 10, 95, -5), (27, 27, 69, -5), (27, 25, 90, -5), (27, 25, 89, -5), (91, 25, 79, -5), (27, 25, 88, -5), (27, 25, 86, -5), (27, 25, 85, -5), (27, 25, 84, -5), (51, 65, 34, -5), (27, 26, 84, -5), (27, 27, 84, -5), (27, 29, 84, -5), (59, 44, 33, -5), (46, 7, 92, -5), (91, 38, 47, -5), (32, 21, 32, -6), (67, 70, 64, -6), (73, 32, 32, -6), (92, 53, 70, -6), (86, 33, 32, -6), (36, 14, 96, -6), (26, 52, 88, -6), (67, 25, 96, -6), (83, 69, 37, -6), (92, 25, 79, -6), (30, 68, 85, -6), (73, 23, 32, -6), (73, 25, 32, -6), (73, 28, 32, -6), (85, 33, 32, -6), (83, 32, 32, -6), (46, 7, 93, -6), (92, 38, 38, -6), (92, 38, 40, -6), (92, 38, 41, -6), (92, 38, 43, -6), (26, 52, 87, -6), (92, 38, 45, -6), (26, 52, 85, -6), (92, 38, 46, -6), (26, 52, 84, -6), (39, 71, 72, -7), (28, 12, 35, -7), (73, 32, 31, -7), (64, 71, 56, -7), (75, 27, 31, -7), (73, 25, 31, -7), (25, 26, 85, -7), (73, 30, 31, -7), (75, 31, 31, -7), (25, 44, 55, -7), (53, 62, 97, -7), (73, 45, 31, -7), (77, 24, 31, -7), (27, 25, 92, -7), (93, 25, 79, -7), (84, 32, 31, -7), (30, 69, 85, -7), (67, 22, 31, -7), (67, 26, 31, -7), (62, 32, 31, -7), (66, 32, 31, -7), (73, 28, 31, -7), (93, 37, 61, -7), (73, 27, 31, -7), (46, 7, 94, -7), (59, 40, 31, -7), (78, 32, 31, -7), (59, 39, 31, -7), (75, 32, 31, -7), (73, 42, 31, -7), (80, 32, 31, -7), (73, 31, 31, -7), (70, 32, 31, -7), (73, 41, 31, -7), (63, 32, 31, -7), (67, 32, 31, -7), (78, 72, 71, -8), (72, 20, 30, -8), (42, 66, 96, -8), (77, 28, 98, -8), (39, 65, 31, -8), (30, 51, 32, -8), (39, 22, 98, -8), (84, 32, 30, -8), (72, 6, 34, -8), (86, 58, 98, -8), (71, 7, 33, -8), (24, 10, 87, -8), (88, 70, 69, -8), (80, 29, 98, -8), (30, 70, 90, -8), (27, 25, 93, -8), (30, 70, 88, -8), (30, 70, 87, -8), (30, 70, 85, -8), (92, 38, 36, -8), (75, 41, 30, -8), (73, 20, 30, -8), (24, 50, 86, -8), (46, 7, 95, -8), (73, 41, 30, -8),
c.argument('hide_from_outlook_clients', arg_type=get_three_state_flag(), help='True if the group is not ' 'displayed in Outlook clients, such as Outlook for Windows and Outlook on the web; otherwise, ' 'false. Default value is false. Returned only on $select.', arg_group='Group') c.argument('is_subscribed_by_mail', arg_type=get_three_state_flag(), help='Indicates whether the signed-in ' 'user is subscribed to receive email conversations. Default value is true. Returned only on ' '$select.', arg_group='Group') c.argument('unseen_count', type=int, help='Count of conversations that have received new posts since the ' 'signed-in user last visited the group. Returned only on $select.', arg_group='Group') c.argument('group_is_archived', arg_type=get_three_state_flag(), help='', arg_group='Group') c.argument('app_role_assignments', action=AddAppRoleAssignments, nargs='+', help='', arg_group='Group') c.argument('created_on_behalf_of', action=AddCreatedOnBehalfOf, nargs='+', help='Represents an Azure Active ' 'Directory object. The directoryObject type is the base type for many other directory entity types.', arg_group='Group') c.argument('member_of', action=AddMemberOf, nargs='+', help='Groups that this group is a member of. HTTP ' 'Methods: GET (supported for all groups). Read-only. Nullable.', arg_group='Group') c.argument('microsoft_graph_group_members', action=AddMicrosoftGraphGroupMembers, nargs='+', help='Users and ' 'groups that are members of this group. HTTP Methods: GET (supported for all groups), POST ' '(supported for Microsoft 365 groups, security groups and mail-enabled security groups), DELETE ' '(supported for Microsoft 365 groups and security groups) Nullable.', arg_group='Group') c.argument('members_with_license_errors', action=AddMembersWithLicenseErrors, nargs='+', help='A list of group ' 'members with license errors from this group-based license assignment. Read-only.', arg_group='Group') c.argument('owners', action=AddOwners, nargs='+', help='The owners of the group. The owners are a set of ' 'non-admin users who are allowed to modify this object. Limited to 100 owners. HTTP Methods: GET ' '(supported for all groups), POST (supported for Microsoft 365 groups, security groups and ' 'mail-enabled security groups), DELETE (supported for Microsoft 365 groups and security groups). ' 'Nullable.', arg_group='Group') c.argument('settings', type=validate_file_or_dict, help='Read-only. Nullable. Expected value: ' 'json-string/@json-file.', arg_group='Group') c.argument('transitive_member_of', action=AddTransitiveMemberOf, nargs='+', help='', arg_group='Group') c.argument('transitive_members', action=AddTransitiveMembers, nargs='+', help='', arg_group='Group') c.argument('accepted_senders', action=AddAcceptedSenders, nargs='+', help='The list of users or groups that ' 'are allowed to create post\'s or calendar events in this group. If this list is non-empty then ' 'only users or groups listed here are allowed to post.', arg_group='Group') c.argument('calendar', type=validate_file_or_dict, help='calendar Expected value: json-string/@json-file.', arg_group='Group') c.argument('calendar_view', type=validate_file_or_dict, help='The calendar view for the calendar. Read-only. ' 'Expected value: json-string/@json-file.', arg_group='Group') c.argument('conversations', type=validate_file_or_dict, help='The group\'s conversations. Expected value: ' 'json-string/@json-file.', arg_group='Group') c.argument('events', type=validate_file_or_dict, help='The group\'s calendar events. Expected value: ' 'json-string/@json-file.', arg_group='Group') c.argument('photo', action=AddGroupsPhoto, nargs='+', help='profilePhoto', arg_group='Group') c.argument('photos', action=AddPhotos, nargs='+', help='The profile photos owned by the group. Read-only. ' 'Nullable.', arg_group='Group') c.argument('rejected_senders', action=AddRejectedSenders, nargs='+', help='The list of users or groups that ' 'are not allowed to create posts or calendar events in this group. Nullable', arg_group='Group') c.argument('threads', type=validate_file_or_dict, help='The group\'s conversation threads. Nullable. Expected ' 'value: json-string/@json-file.', arg_group='Group') c.argument('drive', type=validate_file_or_dict, help='drive Expected value: json-string/@json-file.', arg_group='Group') c.argument('drives', type=validate_file_or_dict, help='The group\'s drives. Read-only. Expected value: ' 'json-string/@json-file.', arg_group='Group') c.argument('sites', type=validate_file_or_dict, help='The list of SharePoint sites in this group. Access the ' 'default site with /sites/root. Expected value: json-string/@json-file.', arg_group='Group') c.argument('extensions', action=AddExtensions, nargs='+', help='The collection of open extensions defined for ' 'the group. Read-only. Nullable.', arg_group='Group') c.argument('group_lifecycle_policies', action=AddGroupLifecyclePolicies, nargs='+', help='The collection of ' 'lifecycle policies for this group. Read-only. Nullable.', arg_group='Group') c.argument('planner', type=validate_file_or_dict, help='plannerGroup Expected value: json-string/@json-file.', arg_group='Group') c.argument('onenote', type=validate_file_or_dict, help='onenote Expected value: json-string/@json-file.', arg_group='Group') c.argument('team', type=validate_file_or_dict, help='team Expected value: json-string/@json-file.', arg_group='Group') c.argument('id2', type=str, help='Read-only.', arg_group='Schedule') c.argument('enabled', arg_type=get_three_state_flag(), help='Indicates whether the schedule is enabled for the ' 'team. Required.', arg_group='Schedule') c.argument('offer_shift_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether offer ' 'shift requests are enabled for the schedule.', arg_group='Schedule') c.argument('open_shifts_enabled', arg_type=get_three_state_flag(), help='Indicates whether open shifts are ' 'enabled for the schedule.', arg_group='Schedule') c.argument('provision_status', arg_type=get_enum_type(['NotStarted', 'Running', 'Completed', 'Failed']), help='', arg_group='Schedule') c.argument('provision_status_code', type=str, help='Additional information about why schedule provisioning ' 'failed.', arg_group='Schedule') c.argument('swap_shifts_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether swap ' 'shifts requests are enabled for the schedule.', arg_group='Schedule') c.argument('time_clock_enabled', arg_type=get_three_state_flag(), help='Indicates whether time clock is ' 'enabled for the schedule.', arg_group='Schedule') c.argument('time_off_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether time off ' 'requests are enabled for the schedule.', arg_group='Schedule') c.argument('time_zone', type=str, help='Indicates the time zone of the schedule team using tz database format. ' 'Required.', arg_group='Schedule') c.argument('workforce_integration_ids', nargs='+', help='', arg_group='Schedule') c.argument('offer_shift_requests', action=AddOfferShiftRequests, nargs='+', help='', arg_group='Schedule') c.argument('open_shift_change_requests', action=AddOpenShiftChangeRequests, nargs='+', help='', arg_group='Schedule') c.argument('open_shifts', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.', arg_group='Schedule') c.argument('scheduling_groups', action=AddSchedulingGroups, nargs='+', help='The logical grouping of users in ' 'the schedule (usually by role).', arg_group='Schedule') c.argument('shifts', type=validate_file_or_dict, help='The shifts in the schedule. Expected value: ' 'json-string/@json-file.', arg_group='Schedule') c.argument('swap_shifts_change_requests', action=AddSwapShiftsChangeRequests, nargs='+', help='', arg_group='Schedule') c.argument('time_off_reasons', action=AddTimeOffReasons, nargs='+', help='The set of reasons for a time off in ' 'the schedule.', arg_group='Schedule') c.argument('time_off_requests', action=AddTimeOffRequests, nargs='+', help='', arg_group='Schedule') c.argument('times_off', type=validate_file_or_dict, help='The instances of times off in the schedule. Expected ' 'value: json-string/@json-file.', arg_group='Schedule') with self.argument_context('teams team delete-team') as c: c.argument('team_id', type=str, help='key: id of team') c.argument('if_match', type=str, help='ETag') with self.argument_context('teams team show-team') as c: c.argument('team_id', type=str, help='key: id of team') c.argument('select', nargs='+', help='Select properties to be returned') c.argument('expand', nargs='+', help='Expand related entities') with self.argument_context('teams team archive') as c: c.argument('team_id', type=str, help='key: id of team') c.argument('should_set_spo_site_read_only_for_members', arg_type=get_three_state_flag(), help='') with self.argument_context('teams team clone') as c: c.argument('team_id', type=str, help='key: id of team') c.argument('display_name', type=str, help='') c.argument('description', type=str, help='') c.argument('mail_nickname', type=str, help='') c.argument('classification', type=str, help='') c.argument('visibility', arg_type=get_enum_type(['private', 'public', 'hiddenMembership', 'unknownFutureValue']), help='') c.argument('parts_to_clone', arg_type=get_enum_type(['apps', 'tabs', 'settings', 'channels', 'members']), help='') with self.argument_context('teams team create-channel') as c: c.argument('team_id', type=str, help='key: id of team') c.argument('id_', options_list=['--id'], type=str, help='Read-only.') c.argument('description', type=str, help='Optional textual description for the channel.') c.argument('display_name', type=str, help='Channel name as it will appear to the user in Microsoft Teams.') c.argument('email', type=str, help='The email address for sending messages to the channel. Read-only.') c.argument('membership_type', arg_type=get_enum_type(['standard', 'private', 'unknownFutureValue']), help='') c.argument('web_url', type=str, help='A hyperlink that will navigate to the channel in Microsoft Teams. This ' 'is the URL that you get when you right-click a channel in Microsoft Teams and select Get link to ' 'channel. This URL should be treated as an opaque blob, and not parsed. Read-only.') c.argument('files_folder', type=validate_file_or_dict, help='driveItem Expected value: json-string/@json-file.') c.argument('members', action=AddTeamsMembers, nargs='+', help='') c.argument('messages', type=validate_file_or_dict, help='A collection of all the messages in the channel. A ' 'navigation property. Nullable. Expected value: json-string/@json-file.') c.argument('tabs', type=validate_file_or_dict, help='A collection of all the tabs in the channel. A navigation ' 'property. Expected value: json-string/@json-file.') with self.argument_context('teams team create-installed-app') as c: c.argument('team_id', type=str, help='key: id of team') c.argument('id_', options_list=['--id'], type=str, help='Read-only.') c.argument('teams_app_definition', action=AddTeamsAppDefinition, nargs='+', help='teamsAppDefinition') c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Teams App') c.argument('display_name', type=str, help='The name of the catalog app provided by the app developer in the ' 'Microsoft Teams zip app package.', arg_group='Teams App') c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded', 'unknownFutureValue']), help='', arg_group='Teams ' 'App') c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft ' 'Teams zip app package.', arg_group='Teams App') c.argument('app_definitions', action=AddAppDefinitions, nargs='+', help='The details for each version of the ' 'app.', arg_group='Teams App') with self.argument_context('teams team create-member') as c: c.argument('team_id', type=str, help='key: id of team') c.argument('id_', options_list=['--id'], type=str, help='Read-only.') c.argument('display_name', type=str, help='The display name of the user.') c.argument('roles', nargs='+', help='The roles for that user.') with self.argument_context('teams team create-operation') as c: c.argument('team_id', type=str, help='key: id of team') c.argument('id_', options_list=['--id'], type=str, help='Read-only.') c.argument('attempts_count', type=int, help='Number of times the operation was attempted before being marked ' 'successful or failed.') c.argument('created_date_time', help='Time when the operation was created.') c.argument('error', action=AddError, nargs='+', help='operationError') c.argument('last_action_date_time', help='Time when the async operation was last updated.') c.argument('operation_type', arg_type=get_enum_type(['invalid', 'cloneTeam', 'archiveTeam', 'unarchiveTeam', 'createTeam', 'unknownFutureValue']), help='') c.argument('status', arg_type=get_enum_type(['invalid', 'notStarted', 'inProgress', 'succeeded', 'failed', 'unknownFutureValue']), help='') c.argument('target_resource_id', type=str, help='The ID of the object that\'s created or modified as result of ' 'this async operation, typically a team.') c.argument('target_resource_location', type=str, help='The location of the object that\'s created or modified ' 'as result of this async operation. This URL should be treated as an opaque value and not parsed ' 'into its component paths.') with self.argument_context('teams team delete-channel') as c: c.argument('team_id', type=str, help='key: id of team') c.argument('channel_id', type=str, help='key: id of channel') c.argument('if_match', type=str, help='ETag') with self.argument_context('teams team delete-installed-app') as c: c.argument('team_id', type=str, help='key: id of team') c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation') c.argument('if_match', type=str, help='ETag') with self.argument_context('teams team delete-member') as c: c.argument('team_id', type=str, help='key: id of team') c.argument('conversation_member_id', type=str, help='key: id of conversationMember') c.argument('if_match', type=str, help='ETag') with self.argument_context('teams team delete-operation') as
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['NatIpArgs', 'NatIp'] @pulumi.input_type class NatIpArgs: def __init__(__self__, *, nat_gateway_id: pulumi.Input[str], dry_run: Optional[pulumi.Input[bool]] = None, nat_ip: Optional[pulumi.Input[str]] = None, nat_ip_cidr: Optional[pulumi.Input[str]] = None, nat_ip_cidr_id: Optional[pulumi.Input[str]] = None, nat_ip_description: Optional[pulumi.Input[str]] = None, nat_ip_name: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a NatIp resource. :param pulumi.Input[str] nat_gateway_id: The ID of the Virtual Private Cloud (VPC) NAT gateway for which you want to create the NAT IP address. :param pulumi.Input[bool] dry_run: Specifies whether to check the validity of the request without actually making the request. :param pulumi.Input[str] nat_ip: The NAT IP address that you want to create. If you do not specify an IP address, the system selects a random IP address from the specified CIDR block. :param pulumi.Input[str] nat_ip_cidr: NAT IP ADDRESS of the address segment. :param pulumi.Input[str] nat_ip_cidr_id: The ID of the CIDR block to which the NAT IP address belongs. :param pulumi.Input[str] nat_ip_description: NAT IP ADDRESS description of information. Length is from `2` to `256` characters, must start with a letter or the Chinese at the beginning, but not at the` http://` Or `https://` at the beginning. :param pulumi.Input[str] nat_ip_name: NAT IP ADDRESS the name of the root directory. Length is from `2` to `128` characters, must start with a letter or the Chinese at the beginning can contain numbers, half a period (.), underscore (_) and dash (-). But do not start with `http://` or `https://` at the beginning. """ pulumi.set(__self__, "nat_gateway_id", nat_gateway_id) if dry_run is not None: pulumi.set(__self__, "dry_run", dry_run) if nat_ip is not None: pulumi.set(__self__, "nat_ip", nat_ip) if nat_ip_cidr is not None: pulumi.set(__self__, "nat_ip_cidr", nat_ip_cidr) if nat_ip_cidr_id is not None: pulumi.set(__self__, "nat_ip_cidr_id", nat_ip_cidr_id) if nat_ip_description is not None: pulumi.set(__self__, "nat_ip_description", nat_ip_description) if nat_ip_name is not None: pulumi.set(__self__, "nat_ip_name", nat_ip_name) @property @pulumi.getter(name="natGatewayId") def nat_gateway_id(self) -> pulumi.Input[str]: """ The ID of the Virtual Private Cloud (VPC) NAT gateway for which you want to create the NAT IP address. """ return pulumi.get(self, "nat_gateway_id") @nat_gateway_id.setter def nat_gateway_id(self, value: pulumi.Input[str]): pulumi.set(self, "nat_gateway_id", value) @property @pulumi.getter(name="dryRun") def dry_run(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether to check the validity of the request without actually making the request. """ return pulumi.get(self, "dry_run") @dry_run.setter def dry_run(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dry_run", value) @property @pulumi.getter(name="natIp") def nat_ip(self) -> Optional[pulumi.Input[str]]: """ The NAT IP address that you want to create. If you do not specify an IP address, the system selects a random IP address from the specified CIDR block. """ return pulumi.get(self, "nat_ip") @nat_ip.setter def nat_ip(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nat_ip", value) @property @pulumi.getter(name="natIpCidr") def nat_ip_cidr(self) -> Optional[pulumi.Input[str]]: """ NAT IP ADDRESS of the address segment. """ return pulumi.get(self, "nat_ip_cidr") @nat_ip_cidr.setter def nat_ip_cidr(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nat_ip_cidr", value) @property @pulumi.getter(name="natIpCidrId") def nat_ip_cidr_id(self) -> Optional[pulumi.Input[str]]: """ The ID of the CIDR block to which the NAT IP address belongs. """ return pulumi.get(self, "nat_ip_cidr_id") @nat_ip_cidr_id.setter def nat_ip_cidr_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nat_ip_cidr_id", value) @property @pulumi.getter(name="natIpDescription") def nat_ip_description(self) -> Optional[pulumi.Input[str]]: """ NAT IP ADDRESS description of information. Length is from `2` to `256` characters, must start with a letter or the Chinese at the beginning, but not at the` http://` Or `https://` at the beginning. """ return pulumi.get(self, "nat_ip_description") @nat_ip_description.setter def nat_ip_description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nat_ip_description", value) @property @pulumi.getter(name="natIpName") def nat_ip_name(self) -> Optional[pulumi.Input[str]]: """ NAT IP ADDRESS the name of the root directory. Length is from `2` to `128` characters, must start with a letter or the Chinese at the beginning can contain numbers, half a period (.), underscore (_) and dash (-). But do not start with `http://` or `https://` at the beginning. """ return pulumi.get(self, "nat_ip_name") @nat_ip_name.setter def nat_ip_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nat_ip_name", value) @pulumi.input_type class _NatIpState: def __init__(__self__, *, dry_run: Optional[pulumi.Input[bool]] = None, nat_gateway_id: Optional[pulumi.Input[str]] = None, nat_ip: Optional[pulumi.Input[str]] = None, nat_ip_cidr: Optional[pulumi.Input[str]] = None, nat_ip_cidr_id: Optional[pulumi.Input[str]] = None, nat_ip_description: Optional[pulumi.Input[str]] = None, nat_ip_id: Optional[pulumi.Input[str]] = None, nat_ip_name: Optional[pulumi.Input[str]] = None, status: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering NatIp resources. :param pulumi.Input[bool] dry_run: Specifies whether to check the validity of the request without actually making the request. :param pulumi.Input[str] nat_gateway_id: The ID of the Virtual Private Cloud (VPC) NAT gateway for which you want to create the NAT IP address. :param pulumi.Input[str] nat_ip: The NAT IP address that you want to create. If you do not specify an IP address, the system selects a random IP address from the specified CIDR block. :param pulumi.Input[str] nat_ip_cidr: NAT IP ADDRESS of the address segment. :param pulumi.Input[str] nat_ip_cidr_id: The ID of the CIDR block to which the NAT IP address belongs. :param pulumi.Input[str] nat_ip_description: NAT IP ADDRESS description of information. Length is from `2` to `256` characters, must start with a letter or the Chinese at the beginning, but not at the` http://` Or `https://` at the beginning. :param pulumi.Input[str] nat_ip_name: NAT IP ADDRESS the name of the root directory. Length is from `2` to `128` characters, must start with a letter or the Chinese at the beginning can contain numbers, half a period (.), underscore (_) and dash (-). But do not start with `http://` or `https://` at the beginning. :param pulumi.Input[str] status: The status of the NAT IP address. Valid values: `Available`, `Deleting`, `Creating` and `Deleted`. """ if dry_run is not None: pulumi.set(__self__, "dry_run", dry_run) if nat_gateway_id is not None: pulumi.set(__self__, "nat_gateway_id", nat_gateway_id) if nat_ip is not None: pulumi.set(__self__, "nat_ip", nat_ip) if nat_ip_cidr is not None: pulumi.set(__self__, "nat_ip_cidr", nat_ip_cidr) if nat_ip_cidr_id is not None: pulumi.set(__self__, "nat_ip_cidr_id", nat_ip_cidr_id) if nat_ip_description is not None: pulumi.set(__self__, "nat_ip_description", nat_ip_description) if nat_ip_id is not None: pulumi.set(__self__, "nat_ip_id", nat_ip_id) if nat_ip_name is not None: pulumi.set(__self__, "nat_ip_name", nat_ip_name) if status is not None: pulumi.set(__self__, "status", status) @property @pulumi.getter(name="dryRun") def dry_run(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether to check the validity of the request without actually making the request. """ return pulumi.get(self, "dry_run") @dry_run.setter def dry_run(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dry_run", value) @property @pulumi.getter(name="natGatewayId") def nat_gateway_id(self) -> Optional[pulumi.Input[str]]: """ The ID of the Virtual Private Cloud (VPC) NAT gateway for which you want to create the NAT IP address. """ return pulumi.get(self, "nat_gateway_id") @nat_gateway_id.setter def nat_gateway_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nat_gateway_id", value) @property @pulumi.getter(name="natIp") def nat_ip(self) -> Optional[pulumi.Input[str]]: """ The NAT IP address that you want to create. If you do not specify an IP address, the system selects a random IP address from the specified CIDR block. """ return pulumi.get(self, "nat_ip") @nat_ip.setter def nat_ip(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nat_ip", value) @property @pulumi.getter(name="natIpCidr") def nat_ip_cidr(self) -> Optional[pulumi.Input[str]]: """ NAT IP ADDRESS of the address segment. """ return pulumi.get(self, "nat_ip_cidr") @nat_ip_cidr.setter def nat_ip_cidr(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nat_ip_cidr", value) @property @pulumi.getter(name="natIpCidrId") def nat_ip_cidr_id(self) -> Optional[pulumi.Input[str]]: """ The ID of the CIDR block to which the NAT IP address belongs. """ return pulumi.get(self, "nat_ip_cidr_id") @nat_ip_cidr_id.setter def nat_ip_cidr_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nat_ip_cidr_id", value) @property @pulumi.getter(name="natIpDescription") def nat_ip_description(self) -> Optional[pulumi.Input[str]]: """ NAT IP ADDRESS description of information. Length is from `2` to `256` characters, must start with a letter or the Chinese at the beginning, but not at the` http://` Or `https://` at the beginning. """ return pulumi.get(self, "nat_ip_description") @nat_ip_description.setter def nat_ip_description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nat_ip_description", value) @property @pulumi.getter(name="natIpId") def nat_ip_id(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "nat_ip_id") @nat_ip_id.setter def nat_ip_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nat_ip_id", value) @property @pulumi.getter(name="natIpName") def nat_ip_name(self) -> Optional[pulumi.Input[str]]: """ NAT IP ADDRESS the name of the root directory. Length is from `2` to `128` characters, must start with a letter or the Chinese at the beginning can contain numbers, half a period (.), underscore (_) and dash (-). But do not start with `http://` or `https://` at the beginning. """ return pulumi.get(self, "nat_ip_name") @nat_ip_name.setter def nat_ip_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nat_ip_name", value) @property @pulumi.getter def status(self) ->
<gh_stars>1-10 ''' Module used for feature extraction of a corpus. ''' import time import os import collections import argparse import spacy import numpy as np from gensim.models import KeyedVectors from xml.etree import cElementTree as ET import pandas as pd from sklearn.linear_model import LinearRegression from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.svm import SVR from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import BaggingRegressor from sklearn.ensemble import AdaBoostRegressor from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_regression from sklearn.feature_selection import SelectPercentile from sklearn.feature_selection import SelectFromModel from sklearn.feature_selection import RFE from sklearn.feature_selection import RFECV from sklearn.ensemble import VotingRegressor from sklearn.model_selection import GridSearchCV from joblib import dump, load from NLPyPort.FullPipeline import new_full_pipe from ASAPPpy import ROOT_PATH from .scripts.xml_reader import read_xml from .scripts.xml_reader import read_xml_no_attributes from .models.word2vec.word2vec import word2vec_model from .models.fastText.fasttext import fasttext_model from .models.ontoPT.ptlkb import ptlkb_model from .load_embeddings import word_embeddings_model from .scripts.tools import preprocessing, compute_tfidf_matrix, read_corpus, write_features_to_csv from .feature_engineering.lexical_features import create_word_ngrams, create_multiple_word_ngrams, create_character_ngrams, create_multiple_character_ngrams, compute_jaccard, compute_dice, compute_overlap, NG from .feature_engineering.syntactic_features import compute_pos, dependency_parsing from .feature_engineering.semantic_features import compute_ner, compute_semantic_relations def build_sentences_from_tokens(tokens): """ Function used to rebuild the sentences from the tokens returned by the pipeline """ sentences = [] tmp_sentence = [] for elem in tokens: if elem == "EOS": tmp_sentence = ' '.join(tmp_sentence) sentences.append(tmp_sentence) tmp_sentence = [] else: tmp_sentence.append(elem) return sentences def extract_features(run_pipeline, corpus, preprocessed_corpus, word2vec_mdl=None, fasttext_mdl=None, ptlkb64_mdl=None, glove300_mdl=None, numberbatch_mdl=None, f_selection=None): """ Function used to extract the features """ # run NLPyPort pipeline before extracting the features if run_pipeline == 1: # if system_mode == 0: # corpus_path = os.path.join('datasets', 'FAQ_todas_variantes_texto_clean.txt') # elif system_mode == 1: # corpus_path = os.path.join('NLPyPort', 'SampleInput', 'train_corpus_ptpt_ptbr.txt') # elif system_mode == 2: # corpus_path = os.path.join('NLPyPort', 'SampleInput', 'train_test_corpus_ptpt_ptbr.txt') # elif system_mode == 3: # corpus_path = "tmp_file.txt" # elif system_mode == 4: # corpus_path = os.path.join('NLPyPort', 'SampleInput', 'complete_assin_training_corpus.txt') # elif system_mode == 5: # corpus_path = os.path.join('NLPyPort', 'SampleInput', 'complete_assin_training_assin1_testing_corpus.txt') start_time = time.time() print("Started running the pipeline") pipeline_output = new_full_pipe(corpus, options={"string_or_array":True}) # there's still a bug in the pipeline that makes it output two EOS tokens at the end of each run, reason why we read the output to the penultimate token. tags = build_sentences_from_tokens(pipeline_output.pos_tags) lemmas = build_sentences_from_tokens(pipeline_output.lemas) entities = build_sentences_from_tokens(pipeline_output.entities) print("Finished running the pipeline successfully") print("--- %s seconds ---" %(time.time() - start_time)) print('\a') features = [] # create word ngrams of different sizes if (f_selection is None) or (1 in f_selection[0:9:3]): word_ngrams_1 = create_word_ngrams(preprocessed_corpus, 1) if (f_selection is None) or (1 in f_selection[1:9:3]): word_ngrams_2 = create_word_ngrams(preprocessed_corpus, 2) if (f_selection is None) or (1 in f_selection[2:9:3]): word_ngrams_3 = create_word_ngrams(preprocessed_corpus, 3) # compute distance coefficients for these ngrams if (f_selection is None) or f_selection[0]: wn_jaccard_1 = compute_jaccard(word_ngrams_1) if (f_selection is None) or f_selection[1]: wn_jaccard_2 = compute_jaccard(word_ngrams_2) if (f_selection is None) or f_selection[2]: wn_jaccard_3 = compute_jaccard(word_ngrams_3) if (f_selection is None) or f_selection[3]: wn_dice_1 = compute_dice(word_ngrams_1) if (f_selection is None) or f_selection[4]: wn_dice_2 = compute_dice(word_ngrams_2) if (f_selection is None) or f_selection[5]: wn_dice_3 = compute_dice(word_ngrams_3) if (f_selection is None) or f_selection[6]: wn_overlap_1 = compute_overlap(word_ngrams_1) if (f_selection is None) or f_selection[7]: wn_overlap_2 = compute_overlap(word_ngrams_2) if (f_selection is None) or f_selection[8]: wn_overlap_3 = compute_overlap(word_ngrams_3) # create character ngrams of different sizes if (f_selection is None) or (1 in f_selection[9:18:3]): character_ngrams_2 = create_character_ngrams(preprocessed_corpus, 2) if (f_selection is None) or (1 in f_selection[10:18:3]): character_ngrams_3 = create_character_ngrams(preprocessed_corpus, 3) if (f_selection is None) or (1 in f_selection[11:18:3]): character_ngrams_4 = create_character_ngrams(preprocessed_corpus, 4) # compute distance coefficients for these ngrams if (f_selection is None) or f_selection[9]: cn_jaccard_2 = compute_jaccard(character_ngrams_2) if (f_selection is None) or f_selection[10]: cn_jaccard_3 = compute_jaccard(character_ngrams_3) if (f_selection is None) or f_selection[11]: cn_jaccard_4 = compute_jaccard(character_ngrams_4) if (f_selection is None) or f_selection[12]: cn_dice_2 = compute_dice(character_ngrams_2) if (f_selection is None) or f_selection[13]: cn_dice_3 = compute_dice(character_ngrams_3) if (f_selection is None) or f_selection[14]: cn_dice_4 = compute_dice(character_ngrams_4) if (f_selection is None) or f_selection[15]: cn_overlap_2 = compute_overlap(character_ngrams_2) if (f_selection is None) or f_selection[16]: cn_overlap_3 = compute_overlap(character_ngrams_3) if (f_selection is None) or f_selection[17]: cn_overlap_4 = compute_overlap(character_ngrams_4) if word2vec_mdl: if (f_selection is None) or f_selection[18]: word2vec = word2vec_model(word2vec_mdl, corpus, 0, 1, 0) if (f_selection is None) or f_selection[19]: word2vec_tfidf = word2vec_model(word2vec_mdl, corpus, 1, 1, 0) if fasttext_mdl: if (f_selection is None) or f_selection[20]: fasttext = fasttext_model(fasttext_mdl, corpus, 0, 1, 0) if (f_selection is None) or f_selection[21]: fasttext_tfidf = fasttext_model(fasttext_mdl, corpus, 1, 1, 0) if ptlkb64_mdl: if (f_selection is None) or f_selection[22]: # if run_pipeline == 0: # ptlkb_64 = ptlkb.word_embeddings_model(run_pipeline, system_mode, ptlkb64_mdl, 0, 1, 0) # else: ptlkb_64 = ptlkb_model(ptlkb64_mdl, 0, 1, 0, lemmas) if (f_selection is None) or f_selection[23]: # if run_pipeline == 0: # ptlkb_64_tfidf = ptlkb.word_embeddings_model(run_pipeline, system_mode, ptlkb64_mdl, 1, 1, 0) # else: ptlkb_64_tfidf = ptlkb_model(ptlkb64_mdl, 1, 1, 0, lemmas) if glove300_mdl: if (f_selection is None) or f_selection[24]: glove_300 = word_embeddings_model(glove300_mdl, corpus, 0, 1, 0) if (f_selection is None) or f_selection[25]: glove_300_tfidf = word_embeddings_model(glove300_mdl, corpus, 1, 1, 0) # compute tfidf matrix - padding was applied to vectors of different sizes by adding zeros to the smaller vector of the pair if (f_selection is None) or f_selection[26]: tfidf_corpus = preprocessing(corpus, 0, 0, 0, 1) tfidf_matrix = compute_tfidf_matrix(tfidf_corpus, 0, 0, 1) # compute semantic relations coefficients if (f_selection is None) or (1 in f_selection[27:31]): # relations_file_path = os.path.join('semantic_relations', 'triplos_10recs', 'triplos_todos_10recs_n.txt') # if run_pipeline == 0: # semantic_relations = compute_semantic_relations(run_pipeline, system_mode, relations_file_path, 3) # else: semantic_relations = compute_semantic_relations(lemmas) # compute POS tags if (f_selection is None) or (1 in f_selection[31:50]): # if run_pipeline == 0: # pos_tags = compute_pos(run_pipeline, system_mode) # else: pos_tags = compute_pos(tags) # compute NERs if (f_selection is None) or (1 in f_selection[50:61]): # if run_pipeline == 0: # ners = compute_ner(run_pipeline, system_mode) # else: ners = compute_ner(entities) # compute Syntactic Dependency parsing if (f_selection is None) or f_selection[61]: dependencies = dependency_parsing(corpus) # create multiple word ngrams of different sizes if (f_selection is None) or f_selection[62]: word_ngrams_1_2 = create_multiple_word_ngrams(preprocessed_corpus, 1, 2) # compute the cosine similarity between the multiple word ngrams converted sentences if (f_selection is None) or f_selection[62]: wn_cosine_1_2 = NG(word_ngrams_1_2) # create multiple character ngrams of different sizes if (f_selection is None) or f_selection[63]: character_ngrams_1_2_3 = create_multiple_character_ngrams(preprocessed_corpus, 1, 2, 3) # compute the cosine similarity between the multiple character ngrams converted sentences if (f_selection is None) or f_selection[63]: cn_cosine_1_2_3 = NG(character_ngrams_1_2_3) if numberbatch_mdl: if (f_selection is None) or f_selection[64]: numberbatch = word_embeddings_model(numberbatch_mdl, corpus, 0, 1, 0) if (f_selection is None) or f_selection[65]: numberbatch_tfidf = word_embeddings_model(numberbatch_mdl, corpus, 1, 1, 0) for pair in range(len(preprocessed_corpus)): if f_selection is not None: features_pair = [] if f_selection[0]: features_pair.append(wn_jaccard_1['jaccard'][pair]) if f_selection[1]: features_pair.append(wn_jaccard_2['jaccard'][pair]) if f_selection[2]: features_pair.append(wn_jaccard_3['jaccard'][pair]) if f_selection[3]: features_pair.append(wn_dice_1['dice'][pair]) if f_selection[4]: features_pair.append(wn_dice_2['dice'][pair]) if f_selection[5]: features_pair.append(wn_dice_3['dice'][pair]) if f_selection[6]: features_pair.append(wn_overlap_1['overlap'][pair]) if f_selection[7]: features_pair.append(wn_overlap_2['overlap'][pair]) if f_selection[8]: features_pair.append(wn_overlap_3['overlap'][pair]) if f_selection[9]: features_pair.append(cn_jaccard_2['jaccard'][pair]) if f_selection[10]: features_pair.append(cn_jaccard_3['jaccard'][pair]) if f_selection[11]: features_pair.append(cn_jaccard_4['jaccard'][pair]) if f_selection[12]: features_pair.append(cn_dice_2['dice'][pair]) if f_selection[13]: features_pair.append(cn_dice_3['dice'][pair]) if f_selection[14]: features_pair.append(cn_dice_4['dice'][pair]) if f_selection[15]: features_pair.append(cn_overlap_2['overlap'][pair]) if f_selection[16]: features_pair.append(cn_overlap_3['overlap'][pair]) if f_selection[17]: features_pair.append(cn_overlap_4['overlap'][pair]) if f_selection[18]: features_pair.append(word2vec[pair]) if f_selection[19]: features_pair.append(word2vec_tfidf[pair]) if f_selection[20]: features_pair.append(fasttext[pair]) if f_selection[21]: features_pair.append(fasttext_tfidf[pair]) if f_selection[22]: features_pair.append(ptlkb_64[pair]) if f_selection[23]: features_pair.append(ptlkb_64_tfidf[pair]) if f_selection[24]: features_pair.append(glove_300[pair]) if f_selection[25]: features_pair.append(glove_300_tfidf[pair]) if f_selection[26]: features_pair.append(tfidf_matrix[pair]) if f_selection[27]: features_pair.append(semantic_relations['antonyms'][pair]) if f_selection[28]: features_pair.append(semantic_relations['synonyms'][pair]) if f_selection[29]: features_pair.append(semantic_relations['hyperonyms'][pair]) if f_selection[30]: features_pair.append(semantic_relations['other'][pair]) if f_selection[31] and ('n' in pos_tags.columns): features_pair.append(pos_tags['n'][pair]) if f_selection[32] and ('prop' in pos_tags.columns): features_pair.append(pos_tags['prop'][pair]) if f_selection[33] and ('adj' in pos_tags.columns): features_pair.append(pos_tags['adj'][pair]) if f_selection[34] and ('n-adj' in pos_tags.columns): features_pair.append(pos_tags['n-adj'][pair]) if f_selection[35] and ('v-fin' in pos_tags.columns): features_pair.append(pos_tags['v-fin'][pair]) if f_selection[36] and ('v-inf' in pos_tags.columns): features_pair.append(pos_tags['v-inf'][pair]) if f_selection[37] and ('v-pcp' in pos_tags.columns): features_pair.append(pos_tags['v-pcp'][pair]) if f_selection[38] and ('v-ger' in pos_tags.columns): features_pair.append(pos_tags['v-ger'][pair]) if f_selection[39] and ('art' in pos_tags.columns): features_pair.append(pos_tags['art'][pair]) if f_selection[40] and ('pron-pers' in pos_tags.columns): features_pair.append(pos_tags['pron-pers'][pair]) if f_selection[41] and ('pron-det' in pos_tags.columns): features_pair.append(pos_tags['pron-det'][pair]) if f_selection[42] and ('pron-indp' in pos_tags.columns): features_pair.append(pos_tags['pron-indp'][pair]) if f_selection[43] and ('adv' in pos_tags.columns): features_pair.append(pos_tags['adv'][pair]) if f_selection[44] and ('num' in pos_tags.columns): features_pair.append(pos_tags['num'][pair]) if f_selection[45] and ('prp' in pos_tags.columns): features_pair.append(pos_tags['prp'][pair]) if f_selection[46] and ('intj' in pos_tags.columns): features_pair.append(pos_tags['intj'][pair]) if f_selection[47] and ('conj-s' in pos_tags.columns): features_pair.append(pos_tags['conj-s'][pair]) if f_selection[48] and ('conj-c' in pos_tags.columns): features_pair.append(pos_tags['conj-c'][pair]) if f_selection[49] and ('punc' in pos_tags.columns): features_pair.append(pos_tags['punc'][pair]) if f_selection[50] and ('all_ners' in ners.columns): features_pair.append(ners['all_ners'][pair]) if f_selection[51] and ('B-ABSTRACCAO' in ners.columns): features_pair.append(ners['B-ABSTRACCAO'][pair]) if f_selection[52] and ('B-ACONTECIMENTO' in ners.columns): features_pair.append(ners['B-ACONTECIMENTO'][pair]) if f_selection[53] and ('B-COISA' in ners.columns): features_pair.append(ners['B-COISA'][pair]) if f_selection[54] and ('B-LOCAL' in ners.columns): features_pair.append(ners['B-LOCAL'][pair]) if f_selection[55] and ('B-OBRA' in ners.columns): features_pair.append(ners['B-OBRA'][pair]) if f_selection[56] and ('B-ORGANIZACAO' in ners.columns): features_pair.append(ners['B-ORGANIZACAO'][pair]) if f_selection[57] and ('B-OUTRO' in ners.columns): features_pair.append(ners['B-OUTRO'][pair]) if f_selection[58] and ('B-PESSOA' in ners.columns): features_pair.append(ners['B-PESSOA'][pair]) if f_selection[59] and ('B-TEMPO' in ners.columns): features_pair.append(ners['B-TEMPO'][pair]) if f_selection[60] and ('B-VALOR' in ners.columns): features_pair.append(ners['B-VALOR'][pair]) if f_selection[61]: features_pair.append(dependencies['dependency_parsing_jc'][pair]) if f_selection[62]: features_pair.append(wn_cosine_1_2['NG'][pair]) if f_selection[63]: features_pair.append(cn_cosine_1_2_3['NG'][pair]) if f_selection[64]: features_pair.append(numberbatch[pair]) if f_selection[65]: features_pair.append(numberbatch_tfidf[pair]) tuple_features_pair = tuple(features_pair) features.append(tuple_features_pair) else: flag = 1 features_pair = [] used_features = [False] * 66 if flag == 1: used_features[0] = True features_pair.append(wn_jaccard_1['jaccard'][pair]) if flag == 1: used_features[1] = True features_pair.append(wn_jaccard_2['jaccard'][pair]) if flag == 1: used_features[2] = True features_pair.append(wn_jaccard_3['jaccard'][pair]) if flag == 1: used_features[3] = True features_pair.append(wn_dice_1['dice'][pair]) if flag == 1: used_features[4] = True features_pair.append(wn_dice_2['dice'][pair]) if flag == 1: used_features[5] = True features_pair.append(wn_dice_3['dice'][pair]) if flag == 1: used_features[6] = True features_pair.append(wn_overlap_1['overlap'][pair]) if flag == 1: used_features[7] = True features_pair.append(wn_overlap_2['overlap'][pair]) if flag == 1: used_features[8] = True features_pair.append(wn_overlap_3['overlap'][pair]) if flag == 1: used_features[9] = True features_pair.append(cn_jaccard_2['jaccard'][pair]) if flag == 1: used_features[10] = True features_pair.append(cn_jaccard_3['jaccard'][pair]) if flag == 1: used_features[11] = True features_pair.append(cn_jaccard_4['jaccard'][pair]) if flag == 1: used_features[12] = True features_pair.append(cn_dice_2['dice'][pair]) if flag == 1: used_features[13] = True features_pair.append(cn_dice_3['dice'][pair]) if flag == 1: used_features[14] = True features_pair.append(cn_dice_4['dice'][pair]) if flag == 1: used_features[15] = True features_pair.append(cn_overlap_2['overlap'][pair]) if flag == 1: used_features[16] = True features_pair.append(cn_overlap_3['overlap'][pair]) if flag == 1: used_features[17] = True features_pair.append(cn_overlap_4['overlap'][pair]) if flag == 0: used_features[18] = True features_pair.append(word2vec[pair]) if flag == 0: used_features[19] = True features_pair.append(word2vec_tfidf[pair]) if flag == 1: used_features[20] = True features_pair.append(fasttext[pair]) if flag == 1: used_features[21] = True features_pair.append(fasttext_tfidf[pair]) if flag == 1: used_features[22] = True features_pair.append(ptlkb_64[pair]) if flag == 1: used_features[23] = True features_pair.append(ptlkb_64_tfidf[pair]) if flag == 1: used_features[24] = True features_pair.append(glove_300[pair]) if flag == 1: used_features[25] = True features_pair.append(glove_300_tfidf[pair]) if flag == 0: used_features[26] = True features_pair.append(tfidf_matrix[pair]) if flag == 0: used_features[27] = True features_pair.append(semantic_relations['antonyms'][pair]) if flag == 1: used_features[28] = True features_pair.append(semantic_relations['synonyms'][pair]) if flag == 1: used_features[29] = True features_pair.append(semantic_relations['hyperonyms'][pair]) if flag == 0: used_features[30] = True features_pair.append(semantic_relations['other'][pair]) if flag == 1 and ('n' in pos_tags.columns): used_features[31] = True features_pair.append(pos_tags['n'][pair]) else: features_pair.append(0) if flag == 1 and ('prop' in pos_tags.columns): used_features[32] = True features_pair.append(pos_tags['prop'][pair]) else: features_pair.append(0) if flag == 1 and ('adj' in pos_tags.columns): used_features[33] = True features_pair.append(pos_tags['adj'][pair]) else: features_pair.append(0) if flag == 0 and ('n-adj' in pos_tags.columns): used_features[34] = True features_pair.append(pos_tags['n-adj'][pair]) else: features_pair.append(0) if flag == 1 and ('v-fin' in pos_tags.columns): used_features[35] = True features_pair.append(pos_tags['v-fin'][pair]) else: features_pair.append(0) if flag == 1 and ('v-inf' in pos_tags.columns): used_features[36] = True features_pair.append(pos_tags['v-inf'][pair]) else: features_pair.append(0) if flag == 1 and ('v-pcp' in pos_tags.columns): used_features[37] = True features_pair.append(pos_tags['v-pcp'][pair]) else: features_pair.append(0) if flag == 1 and ('v-ger' in pos_tags.columns): used_features[38] = True features_pair.append(pos_tags['v-ger'][pair]) else: features_pair.append(0) if flag == 1 and ('art' in pos_tags.columns): used_features[39] = True features_pair.append(pos_tags['art'][pair]) else: features_pair.append(0) if flag == 1 and ('pron-pers' in pos_tags.columns): used_features[40] = True features_pair.append(pos_tags['pron-pers'][pair]) else: features_pair.append(0) if
## dea_datahandling.py ''' Description: This file contains a set of python functions for handling Digital Earth Australia data. License: The code in this notebook is licensed under the Apache License, Version 2.0 (https://www.apache.org/licenses/LICENSE-2.0). Digital Earth Australia data is licensed under the Creative Commons by Attribution 4.0 license (https://creativecommons.org/licenses/by/4.0/). Contact: If you need assistance, please post a question on the Open Data Cube Slack channel (http://slack.opendatacube.org/) or on the GIS Stack Exchange (https://gis.stackexchange.com/questions/ask?tags=open-data-cube) using the `open-data-cube` tag (you can view previously asked questions here: https://gis.stackexchange.com/questions/tagged/open-data-cube). If you would like to report an issue with this script, you can file one on Github (https://github.com/GeoscienceAustralia/dea-notebooks/issues/new). Functions included: load_ard array_to_geotiff mostcommon_utm download_unzip wofs_fuser dilate Last modified: October 2019 ''' # Import required packages import os import gdal import requests import zipfile import warnings import numpy as np import xarray as xr from collections import Counter from datacube.storage import masking from scipy.ndimage import binary_dilation def load_ard(dc, products=None, min_gooddata=0.0, fmask_gooddata=[1, 4, 5], mask_pixel_quality=True, mask_invalid_data=True, ls7_slc_off=True, product_metadata=False, dask_chunks={'time': 1}, lazy_load=False, **dcload_kwargs): ''' Loads Landsat Collection 3 or Sentinel 2 Definitive and Near Real Time data for multiple sensors (i.e. ls5t, ls7e and ls8c for Landsat; s2a and s2b for Sentinel 2), and returns a single masked xarray dataset containing only observations that contain greater than a given proportion of good quality pixels. This can be used to extract clean time series of observations that are not affected by cloud, for example as an input to the `animated_timeseries` function from `dea_plotting`. The proportion of good quality pixels is calculated by summing the pixels flagged as good quality in `fmask`. By default non-cloudy or shadowed land, snow and water pixels are treated as good quality, but this can be customised using the `fmask_gooddata` parameter. MEMORY ISSUES: For large data extractions, it can be advisable to set `mask_pixel_quality=False`. The masking step coerces all numeric values to float32 when NaN values are inserted into the array, potentially causing your data to use twice the memory. Be aware that the resulting arrays will contain invalid values which may affect future analyses. Last modified: September 2019 Parameters ---------- dc : datacube Datacube object The Datacube to connect to, i.e. `dc = datacube.Datacube()`. This allows you to also use development datacubes if required. products : list A list of product names to load data from. Valid options are ['ga_ls5t_ard_3', 'ga_ls7e_ard_3', 'ga_ls8c_ard_3'] for Landsat, ['s2a_ard_granule', 's2b_ard_granule'] for Sentinel 2 Definitive, and ['s2a_nrt_granule', 's2b_nrt_granule'] for Sentinel 2 Near Real Time. min_gooddata : float, optional An optional float giving the minimum percentage of good quality pixels required for a satellite observation to be loaded. Defaults to 0.0 which will return all observations regardless of pixel quality (set to e.g. 0.99 to return only observations with more than 99% good quality pixels). fmask_gooddata : list, optional An optional list of fmask values to treat as good quality observations in the above `min_gooddata` calculation. The default is `[1, 4, 5]` which will return non-cloudy or shadowed land, snow and water pixels. Choose from: `{'0': 'nodata', '1': 'valid', '2': 'cloud', '3': 'shadow', '4': 'snow', '5': 'water'}`. mask_pixel_quality : bool, optional An optional boolean indicating whether to apply the good data mask to all observations that were not filtered out for having less good quality pixels than `min_gooddata`. E.g. if `min_gooddata=0.99`, the filtered observations may still contain up to 1% poor quality pixels. The default of False simply returns the resulting observations without masking out these pixels; True masks them out and sets them to NaN using the good data mask. This will convert numeric values to float32 which can cause memory issues, set to False to prevent this. mask_invalid_data : bool, optional An optional boolean indicating whether invalid -999 nodata values should be replaced with NaN. These invalid values can be caused by missing data along the edges of scenes, or terrain effects (for NBAR-T). Setting `mask_invalid_data=True` will convert all numeric values to float32 when -999 values are replaced with NaN which can cause memory issues; set to False to prevent this. Defaults to True. ls7_slc_off : bool, optional An optional boolean indicating whether to include data from after the Landsat 7 SLC failure (i.e. SLC-off). Defaults to True, which keeps all Landsat 7 observations > May 31 2003. product_metadata : bool, optional An optional boolean indicating whether to return the dataset with a `product` variable that gives the name of the product that each observation in the time series came from (e.g. 'ga_ls5t_ard_3'). Defaults to False. dask_chunks : dict, optional An optional dictionary containing the coords and sizes you wish to create dask chunks over. Usually used in combination with `lazy_load=True` (see below). For example: `dask_chunks = {'x': 500, 'y': 500}` lazy_load : boolean, optional Setting this variable to True will delay the computation of the function until you explicitly run `ds.compute()`. If used in conjuction with `dask.distributed.Client()` this will allow for automatic parallel computation. **dcload_kwargs : A set of keyword arguments to `dc.load` that define the spatiotemporal query used to extract data. This can include `x`, `y`, `time`, `resolution`, `resampling`, `group_by`, `crs` etc, and can either be listed directly in the `load_ard` call (e.g. `x=(150.0, 151.0)`), or by passing in a query kwarg (e.g. `**query`). For a full list of possible options, see: https://datacube-core.readthedocs.io/en/latest/dev/api/generate/datacube.Datacube.load.html Returns ------- combined_ds : xarray Dataset An xarray dataset containing only satellite observations that contains greater than `min_gooddata` proportion of good quality pixels. ''' # Due to possible bug in xarray 0.13.0, define temporary function # which converts dtypes in a way that preserves attributes def astype_attrs(da, dtype=np.float32): ''' Loop through all data variables in the dataset, record attributes, convert to float32, then reassign attributes. If the data variable cannot be converted to float32 (e.g. for a non-numeric dtype like strings), skip and return the variable unchanged. ''' try: da_attr = da.attrs da = da.astype(dtype) da = da.assign_attrs(**da_attr) return da except ValueError: return da # Verify that products were provided if not products: raise ValueError("Please provide a list of product names " "to load data from. Valid options are: \n" "['ga_ls5t_ard_3', 'ga_ls7e_ard_3', 'ga_ls8c_ard_3'] " "for Landsat, ['s2a_ard_granule', " "'s2b_ard_granule'] \nfor Sentinel 2 Definitive, or " "['s2a_nrt_granule', 's2b_nrt_granule'] for " "Sentinel 2 Near Real Time") # If `measurements` are specified but do not include fmask, add it if (('measurements' in dcload_kwargs) and ('fmask' not in dcload_kwargs['measurements'])): dcload_kwargs['measurements'].append('fmask') # Create a list to hold data for each product product_data = [] # Iterate through each requested product for product in products: try: # Load data including fmask band print(f'Loading {product} data') try: ds = dc.load(product=f'{product}', dask_chunks=dask_chunks, **dcload_kwargs) except KeyError as e: raise ValueError(f'Band {e} does not exist in this product. ' f'Verify all requested `measurements` exist ' f'in {products}') # Keep a record of the original number of observations total_obs = len(ds.time) # Remove Landsat 7 SLC-off observations if ls7_slc_off=False if not ls7_slc_off and product == 'ga_ls7e_ard_3': print(' Ignoring SLC-off observations for ls7') ds = ds.sel(time=ds.time < np.datetime64('2003-05-30')) # If no measurements are specified, `fmask` is given a # different name. If necessary, rename it: if 'oa_fmask' in ds: ds = ds.rename({'oa_fmask': 'fmask'}) # Identify all pixels not affected by cloud/shadow/invalid good_quality = ds.fmask.isin(fmask_gooddata) # The good data percentage calculation has to load in all `fmask` # data, which can be slow. If the user has chosen no filtering # by using the default `min_gooddata = 0`, we can skip this
from itertools import chain import time import os import math from tornado_sqlalchemy import as_future from tornado.gen import multi from PIL import Image, ImageDraw, ImageColor, ImageFont from models import DotaProPlayer, DotaHeroes, DotaItem, DotaProTeam from image_generation.helpers import draw_text_outlined_center_align, draw_text_right_align, draw_image, \ draw_image_centered class PostGameMixin: async def generate_post_game(self, game_id): generated_path = os.path.join(self.generated_root, "post_game-" + str(game_id) + ".png") if os.path.exists(generated_path): os.remove(generated_path) # Generate image composition = Image.open(os.path.join(self.assets_root, 'background3.png')).convert('RGBA') image_draw = ImageDraw.Draw(composition) # Prepare fonts rift_player_nickname = ImageFont.truetype( os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_bold_italic.otf'), 46) noto_cjk_player_nickname = ImageFont.truetype( os.path.join(self.assets_root, 'fonts', 'noto', 'noto_sans_cjk_bold.otf'), 38) rift_player_name = ImageFont.truetype( os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_regular.otf'), 26) rift_kda = ImageFont.truetype( os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_bold.otf'), 32) rift_dmg = ImageFont.truetype( os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_regular.otf'), 32) rift_team = ImageFont.truetype( os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_bold.otf'), 50) # Get data game_json = self.download_opendata_if_necessary(game_id) if game_json is None or game_json['version'] is None: image_draw.text([100, 100], str(time.time()), font=rift_player_nickname, fill=self.colors['light_red']) image_draw.text([100, 200], "ERROR WITH OPENDOTA", font=rift_player_nickname, fill=self.colors['light_red']) composition.save(generated_path) return hero_x = 350 hero_y_side_padding = 30 hero_height = 90 hero_width = int(256 * hero_height / 144) hero_y_padding = 10 item_padding = 4 item_height = int((hero_height - item_padding) / 2) item_width = int(88 * item_height / 64) player_name_x_padding = -40 player_name_y_padding = 0 player_nickname_y_padding = 50 kda_padding_x = 5 hero_y = {0: hero_y_side_padding, 1: hero_y_side_padding + hero_height + hero_y_padding, 2: hero_y_side_padding + 2 * (hero_height + hero_y_padding), 3: hero_y_side_padding + 3 * (hero_height + hero_y_padding), 4: hero_y_side_padding + 4 * (hero_height + hero_y_padding), 128: 1080 - hero_y_side_padding - hero_height * 5 - hero_y_padding * 4, 129: 1080 - hero_y_side_padding - hero_height * 4 - hero_y_padding * 3, 130: 1080 - hero_y_side_padding - hero_height * 3 - hero_y_padding * 2, 131: 1080 - hero_y_side_padding - hero_height * 2 - hero_y_padding, 132: 1080 - hero_y_side_padding - hero_height} hero_color = {0: self.colors['hero_blue'], 1: self.colors['hero_teal'], 2: self.colors['hero_purple'], 3: self.colors['hero_yellow'], 4: self.colors['hero_orange'], 128: self.colors['hero_pink'], 129: self.colors['hero_grey'], 130: self.colors['hero_aqua'], 131: self.colors['hero_green'], 132: self.colors['hero_brown']} hero_color_width = 10 # Get database data heroes, items, players, teams = await multi([as_future(self.session.query(DotaHeroes).all), as_future(self.session.query(DotaItem).all), as_future(self.session.query(DotaProPlayer).all), as_future(self.session.query(DotaProTeam).all)]) # Draw Heroes & Items for player in game_json['players']: hero = next((hero for hero in heroes if hero.id == player['hero_id']), None) if hero is None: short_name = 'error' else: short_name = hero.short_name hero_image = Image.open(os.path.join(self.assets_root, 'dota', 'hero_rectangle', short_name + '.png')) \ .convert('RGBA') draw_image(composition, hero_image, [hero_x, hero_y[player['player_slot']]], [None, hero_height]) # Draw Items for j in range(0, 2): for i in range(0, 3): key = 'item_{0}'.format(j * 3 + i) if player[key] != 0: item = next((item for item in items if item.id == player[key]), None) if item is None: short_name = 'error' else: short_name = item.name item_path = os.path.join(self.assets_root, 'dota', 'item_rectangle', short_name + '.png') if not os.path.exists(item_path): item_path = os.path.join(self.assets_root, 'dota', 'item_rectangle', 'default.png') item_image = Image.open(item_path).convert('RGBA') else: item_image = Image.open(os.path.join(self.assets_root, 'dota', 'item_rectangle', 'empty.png')) \ .convert('RGBA') draw_image(composition, item_image, [hero_x + hero_width + (i + 1) * item_padding + i * item_width, hero_y[player['player_slot']] + j * (item_height + item_padding)], [None, item_height]) # Draw neutral item if player["item_neutral"] != 0: item = next((item for item in items if item.id == player["item_neutral"]), None) if item is None: short_name = 'error' else: short_name = item.name item_path = os.path.join(self.assets_root, 'dota', 'item_rectangle', short_name + '.png') if not os.path.exists(item_path): item_path = os.path.join(self.assets_root, 'dota', 'item_rectangle', 'default.png') item_image = Image.open(item_path).convert('RGBA') else: item_image = Image.open(os.path.join(self.assets_root, 'dota', 'item_rectangle', 'empty.png')) \ .convert('RGBA') draw_image(composition, item_image, [hero_x + hero_width + 5*item_padding + 3*item_width, hero_y[player['player_slot']] + int((item_height + item_padding)/2)], [None, item_height]) # Draw icons sword_image = Image.open(os.path.join(self.assets_root, 'icons', 'sword.png')).convert('RGBA') sword_image = sword_image.resize([int(item_height / 2), int(item_height / 2)], Image.LANCZOS) in_place_sword = Image.new('RGBA', (composition.size[0], composition.size[1])) in_place_sword.paste(sword_image, box=[hero_x + hero_width + 4 * (item_width + item_padding + kda_padding_x), hero_y[player['player_slot']] + item_height + 15], mask=sword_image) composition = Image.alpha_composite(composition, in_place_sword) # Draw kda skull skull_image = Image.open(os.path.join(self.assets_root, 'icons', 'skull.png')).convert('RGBA') skull_image = skull_image.resize([int(item_height / 2), int(item_height / 2)], Image.LANCZOS) in_place_skull = Image.new('RGBA', (composition.size[0], composition.size[1])) in_place_skull.paste(skull_image, box=[hero_x + hero_width + 4 * (item_width + item_padding + kda_padding_x), hero_y[player['player_slot']] + 12], mask=skull_image) composition = Image.alpha_composite(composition, in_place_skull) # Draw colors image_draw = ImageDraw.Draw(composition) for player in game_json['players']: image_draw.rectangle([hero_x - hero_color_width, hero_y[player['player_slot']], hero_x, hero_y[player['player_slot']] + hero_height - 1], fill=hero_color[player['player_slot']]) # Draw player names & pseudo for player in game_json['players']: pro_player = next((pro_player for pro_player in players if pro_player.account_id == player['account_id']), None) player_name_font = rift_player_nickname if pro_player is None: name = '-' nickname = '-' else: name = pro_player.name nickname = pro_player.nickname if not len(nickname) == len(nickname.encode()): player_name_font = noto_cjk_player_nickname draw_text_right_align(image_draw, [hero_x + player_name_x_padding, hero_y[player['player_slot']] + player_name_y_padding], nickname, player_name_font, fill=self.colors['white']) draw_text_right_align(image_draw, [hero_x + player_name_x_padding, hero_y[player[ 'player_slot']] + player_name_y_padding + player_nickname_y_padding], name, rift_player_name, fill=self.colors['white']) kda = "{0}/{1}/{2}".format(player['kills'], player['deaths'], player['assists']) image_draw.text([hero_x + hero_width + 4 * (item_width + item_padding + kda_padding_x) + int( item_height / 2) + 3 * item_padding, hero_y[player['player_slot']]], text=kda, font=rift_kda, fill=self.colors['white']) image_draw.text([hero_x + hero_width + 4 * (item_width + item_padding + kda_padding_x) + int( item_height / 2) + 3 * item_padding, hero_y[player['player_slot']] + item_height + item_padding], text=str(player['hero_damage']), font=rift_dmg, fill=self.colors['white']) # Draw graph radiant_gold_adv = game_json['radiant_gold_adv'] radiant_xp_adv = game_json['radiant_xp_adv'] graph_start_x = 910 graph_end_x = 1850 graph_y = 400 graph_width = 4 graph_graduation_x = 10 gold_xp_max = 0 for item in chain(radiant_gold_adv, radiant_xp_adv): if abs(item) > gold_xp_max: gold_xp_max = abs(item) gold_xp_max = int((gold_xp_max - gold_xp_max % 1000) / 1000 + 1) duration = math.ceil(game_json['duration'] / 60) graph_x_step = math.floor((graph_end_x - graph_start_x) / duration) graph_y_step = math.floor(graph_y / gold_xp_max) image_draw.line([graph_start_x, 540 - int(graph_width / 2), graph_end_x, 540 - int(graph_width / 2)], fill=self.colors['white'], width=graph_width) image_draw.line( [graph_start_x - int(graph_width / 2), 540 - graph_y, graph_start_x - int(graph_width / 2), 540 + graph_y], fill=self.colors['white'], width=graph_width) i = 5 while i < gold_xp_max: image_draw.line([graph_start_x, 540 + graph_y_step * i, graph_end_x, 540 + graph_y_step * i], fill=self.colors['grey'], width=1) image_draw.line([graph_start_x, 540 - graph_y_step * i, graph_end_x, 540 - graph_y_step * i], fill=self.colors['grey'], width=1) i += 5 i = 5 while i < duration: image_draw.line([graph_start_x + i * graph_x_step, 540 - graph_graduation_x - 2, graph_start_x + i * graph_x_step, 540 + graph_graduation_x - 1], fill=self.colors['white'], width=graph_width) i += 5 for i in range(1, duration): image_draw.line([graph_start_x + (i - 1) * graph_x_step, 540 - int(graph_y_step * (radiant_xp_adv[i - 1] / 1000)), graph_start_x + i * graph_x_step, 540 - int(graph_y_step * (radiant_xp_adv[i] / 1000))], fill=self.colors['blue'], width=6) image_draw.line([graph_start_x + (i - 1) * graph_x_step, 540 - int(graph_y_step * (radiant_gold_adv[i - 1] / 1000)), graph_start_x + i * graph_x_step, 540 - int(graph_y_step * (radiant_gold_adv[i] / 1000))], fill=self.colors['yellow'], width=6) for objectif in game_json['objectives']: objectif_x = 0 objectif_y = 0 image = 'error' if objectif['type'] in ['CHAT_MESSAGE_COURIER_LOST', 'building_kill', 'CHAT_MESSAGE_ROSHAN_KILL']: objectif_x = graph_start_x + int(graph_x_step * objectif['time'] / 60) if objectif['type'] == 'CHAT_MESSAGE_COURIER_LOST': image = 'chick_kill' if objectif['team'] == 2: objectif_y = 540 - graph_y - 35 else: objectif_y = 540 + graph_y + 35 elif objectif['type'] == 'CHAT_MESSAGE_ROSHAN_KILL': image = 'roshan_kill' if objectif['team'] == 2: objectif_y = 540 - graph_y - 35 else: objectif_y = 540 + graph_y + 35 else: if 'badguys' in objectif['key']: objectif_y = 540 - graph_y - 35 else: objectif_y = 540 + graph_y + 35 if 'tower' in objectif['key']: image = 'tower_kill' elif 'healers' in objectif['key']: image = 'shrine_kill' elif 'melee_rax' in objectif['key']: image = 'rax_kill' if image == 'error': continue image_icon = Image.open(os.path.join(self.assets_root, 'icons', image + '.png')).convert('RGBA') composition = draw_image_centered(composition, image_icon, [objectif_x, objectif_y], [35, 35]) for player in game_json['players']: for item_purchase in player['purchase_log']: if item_purchase['key'] in ['black_king_bar', 'blink', 'sheepstick', 'silver_edge', 'refresher', 'orchid']: if player['player_slot'] > 100: item_y = 540 + graph_y else: item_y = 540 - graph_y item_x = graph_start_x + int(graph_x_step * item_purchase['time'] / 60) image_icon = Image.open( os.path.join(self.assets_root, 'icons', 'item_' + item_purchase['key'] + '.png')).convert( 'RGBA') composition = draw_image_centered(composition, image_icon, [item_x, item_y], [35, 35]) # Draw titles image_draw = ImageDraw.Draw(composition) radiant_team = '?' dire_team = '?' radiant_team_info = next((team for team in teams if team.id == game_json['radiant_team_id']), None) if radiant_team_info is not None: radiant_team = radiant_team_info.name dire_team_info = next((team for team in teams if team.id == game_json['dire_team_id']), None) if dire_team_info is not None: dire_team = dire_team_info.name radiant_color = self.colors['ti_green'] dire_color = self.colors['ti_green'] laurels_icon = Image.open(os.path.join(self.assets_root, 'icons', 'laurels.png')).convert('RGBA') if game_json['radiant_win']: laurels_x = [int((graph_start_x + graph_end_x - image_draw.textsize(radiant_team,
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test the Python API and shell binary of the tensorflowjs pip package.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import glob import json import os import shutil import subprocess import sys import tempfile import unittest import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_spec from tensorflow.python.ops import variables from tensorflow.python.training.tracking import tracking from tensorflow.python.saved_model.save import save import tensorflow_hub as hub import tensorflowjs as tfjs def _createKerasModel(layer_name_prefix, h5_path=None): """Create a Keras model for testing. Args: layer_name_prefix: A prefix string for layer names. This helps avoid clashes in layer names between different test methods. h5_path: Optional string path for a HDF5 (.h5) file to save the model in. Returns: An instance of keras.Model. """ input_tensor = keras.layers.Input((3, )) dense1 = keras.layers.Dense( 4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros', name=layer_name_prefix + '1')(input_tensor) output = keras.layers.Dense( 2, use_bias=False, kernel_initializer='ones', name=layer_name_prefix + '2')(dense1) model = keras.models.Model(inputs=[input_tensor], outputs=[output]) if h5_path: model.save(h5_path) return model def _createTensorFlowSavedModelV1(name_scope, save_path): """Create a TensorFlow SavedModel for testing. Args: name_scope: Name scope to create the model under. This helps avoid op and variable name clashes between different test methods. save_path: The directory path in which to save the model. """ graph = tf.Graph() with graph.as_default(): with tf.compat.v1.name_scope(name_scope): x = tf.compat.v1.constant([[37.0, -23.0], [1.0, 4.0]]) w = tf.compat.v1.get_variable('w', shape=[2, 2]) y = tf.compat.v1.matmul(x, w) output = tf.compat.v1.nn.softmax(y) init_op = w.initializer # Create a builder. builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(save_path) with tf.compat.v1.Session() as sess: # Run the initializer on `w`. sess.run(init_op) builder.add_meta_graph_and_variables( sess, [tf.compat.v1.saved_model.tag_constants.SERVING], signature_def_map={ "serving_default": tf.compat.v1.saved_model.signature_def_utils.predict_signature_def( inputs={"x": x}, outputs={"output": output}) }, assets_collection=None) builder.save() def _createTensorFlowSavedModel(name_scope, save_path): """Create a TensorFlow SavedModel for testing. Args: name_scope: Name scope to create the model under. This helps avoid op and variable name clashes between different test methods. save_path: The directory path in which to save the model. """ input_data = constant_op.constant(1., shape=[1]) root = tracking.AutoTrackable() root.v1 = variables.Variable(3.) root.v2 = variables.Variable(2.) root.f = def_function.function(lambda x: root.v1 * root.v2 * x) to_save = root.f.get_concrete_function(input_data) save(root, save_path, to_save) def _create_hub_module(save_path): """Create a TensorFlow Hub module for testing. Args: save_path: The directory path in which to save the model. """ # Module function that doubles its input. def double_module_fn(): w = tf.Variable([2.0, 4.0]) x = tf.compat.v1.placeholder(dtype=tf.float32) hub.add_signature(inputs=x, outputs=x*w) graph = tf.Graph() with graph.as_default(): spec = hub.create_module_spec(double_module_fn) m = hub.Module(spec) # Export the module. with tf.compat.v1.Session(graph=graph) as sess: sess.run(tf.compat.v1.global_variables_initializer()) m.export(save_path, sess) class APIAndShellTest(tf.test.TestCase): """Tests for the Python API of the pip package.""" @classmethod def setUpClass(cls): cls.class_tmp_dir = tempfile.mkdtemp() cls.tf_saved_model_dir = os.path.join(cls.class_tmp_dir, 'tf_saved_model') cls.tf_saved_model_v1_dir = os.path.join( cls.class_tmp_dir, 'tf_saved_model_v1') _createTensorFlowSavedModel('a', cls.tf_saved_model_dir) _createTensorFlowSavedModelV1('b', cls.tf_saved_model_v1_dir) cls.tf_hub_module_dir = os.path.join(cls.class_tmp_dir, 'tf_hub_module') _create_hub_module(cls.tf_hub_module_dir) @classmethod def tearDownClass(cls): shutil.rmtree(cls.class_tmp_dir) def setUp(self): # Make sure this file is not being run from the source directory, to # avoid picking up source files. if os.path.isdir( os.path.join(os.path.dirname(__file__), 'tensorflowjs')): self.fail('Do not run this test from the Python source directory. ' 'This file is intended to be run on pip install.') self._tmp_dir = tempfile.mkdtemp() super(APIAndShellTest, self).setUp() def tearDown(self): if os.path.isdir(self._tmp_dir): shutil.rmtree(self._tmp_dir) super(APIAndShellTest, self).tearDown() def testVersionString(self): self.assertEqual(2, tfjs.__version__.count('.')) def testSaveKerasModel(self): with self.test_session(): # First create a toy keras model. model = _createKerasModel('MergedDense') tfjs.converters.save_keras_model(model, self._tmp_dir) # Briefly check the model topology. with open(os.path.join(self._tmp_dir, 'model.json')) as f: json_content = json.load(f) model_json = json_content['modelTopology'] self.assertIsInstance(model_json['model_config'], dict) self.assertIsInstance(model_json['model_config']['config'], dict) self.assertIn('layers', model_json['model_config']['config']) weights_manifest = json_content['weightsManifest'] self.assertIsInstance(weights_manifest, list) # Briefly check the weights manifest. weight_shapes = dict() weight_dtypes = dict() for manifest_item in weights_manifest: for weight in manifest_item['weights']: weight_name = weight['name'] weight_shapes[weight_name] = weight['shape'] weight_dtypes[weight_name] = weight['dtype'] self.assertEqual( sorted(list(weight_shapes.keys())), sorted([ 'MergedDense1/kernel', 'MergedDense1/bias', 'MergedDense2/kernel' ])) self.assertEqual(weight_shapes['MergedDense1/kernel'], [3, 4]) self.assertEqual(weight_shapes['MergedDense1/bias'], [4]) self.assertEqual(weight_shapes['MergedDense2/kernel'], [4, 2]) self.assertEqual(weight_dtypes['MergedDense1/kernel'], 'float32') self.assertEqual(weight_dtypes['MergedDense1/bias'], 'float32') self.assertEqual(weight_dtypes['MergedDense2/kernel'], 'float32') def testLoadKerasModel(self): # Use separate tf.Graph and tf.compat.v1.Session contexts to prevent name collision. with tf.Graph().as_default(), tf.compat.v1.Session(): # First create a toy keras model. model1 = _createKerasModel('MergedDense') tfjs.converters.save_keras_model(model1, self._tmp_dir) model1_weight_values = model1.get_weights() with tf.Graph().as_default(), tf.compat.v1.Session(): # Load the model from saved artifacts. model2 = tfjs.converters.load_keras_model( os.path.join(self._tmp_dir, 'model.json')) # Compare the loaded model with the original one. model2_weight_values = model2.get_weights() self.assertEqual(len(model1_weight_values), len(model2_weight_values)) for model1_weight_value, model2_weight_value in zip( model1_weight_values, model2_weight_values): self.assertAllClose(model1_weight_value, model2_weight_value) # Check the content of the output directory. self.assertTrue(glob.glob(os.path.join(self._tmp_dir, 'group*-*'))) def testInvalidInputFormatRaisesError(self): process = subprocess.Popen( [ 'tensorflowjs_converter', '--input_format', 'nonsensical_format', self._tmp_dir, self._tmp_dir ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, stderr = process.communicate() self.assertGreater(process.returncode, 0) self.assertIn(b'--input_format', tf.compat.as_bytes(stderr)) def testMissingInputPathRaisesError(self): process = subprocess.Popen( [ 'tensorflowjs_converter' ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, stderr = process.communicate() self.assertGreater(process.returncode, 0) self.assertIn(b'input_path', tf.compat.as_bytes(stderr)) def testKerasH5ConversionWorksFromCLI(self): with tf.Graph().as_default(), tf.compat.v1.Session(): # First create a toy keras model. os.makedirs(os.path.join(self._tmp_dir, 'keras_h5')) h5_path = os.path.join(self._tmp_dir, 'keras_h5', 'model.h5') _createKerasModel('MergedDenseForCLI', h5_path) process = subprocess.Popen([ 'tensorflowjs_converter', '--input_format', 'keras', h5_path, self._tmp_dir ]) process.communicate() self.assertEqual(0, process.returncode) # Briefly check the model topology. with open(os.path.join(self._tmp_dir, 'model.json'), 'rt') as f: json_content = json.load(f) model_json = json_content['modelTopology'] self.assertIsInstance(model_json['model_config'], dict) self.assertIsInstance(model_json['model_config']['config'], dict) self.assertIn('layers', model_json['model_config']['config']) weights_manifest = json_content['weightsManifest'] self.assertIsInstance(weights_manifest, list) # Briefly check the weights manifest. weight_shapes = dict() weight_dtypes = dict() for manifest_item in weights_manifest: for weight in manifest_item['weights']: weight_name = weight['name'] weight_shapes[weight_name] = weight['shape'] weight_dtypes[weight_name] = weight['dtype'] self.assertEqual( sorted(list(weight_shapes.keys())), sorted([ 'MergedDenseForCLI1/kernel', 'MergedDenseForCLI1/bias', 'MergedDenseForCLI2/kernel' ])) self.assertEqual(weight_shapes['MergedDenseForCLI1/kernel'], [3, 4]) self.assertEqual(weight_shapes['MergedDenseForCLI1/bias'], [4]) self.assertEqual(weight_shapes['MergedDenseForCLI2/kernel'], [4, 2]) self.assertEqual(weight_dtypes['MergedDenseForCLI1/kernel'], 'float32') self.assertEqual(weight_dtypes['MergedDenseForCLI1/bias'], 'float32') self.assertEqual(weight_dtypes['MergedDenseForCLI2/kernel'], 'float32') # Verify that there is only one weight group due to the default # non-split_weights_by_layer behavior. The model is a small one, which # does not exceed the 4-MB shard size limit. Therefore, there should # be only one weight file. self.assertEqual( 1, len(glob.glob(os.path.join(self._tmp_dir, 'group*')))) def testKerasH5ConversionSplitWeightsByLayerWorksFromCLI(self): with tf.Graph().as_default(), tf.compat.v1.Session(): # First create a toy keras model. os.makedirs(os.path.join(self._tmp_dir, 'keras_h5')) h5_path = os.path.join(self._tmp_dir, 'keras_h5', 'model.h5') _createKerasModel('MergedDenseForCLI', h5_path) process = subprocess.Popen([ 'tensorflowjs_converter', '--input_format', 'keras', '--split_weights_by_layer', h5_path, self._tmp_dir ]) process.communicate() self.assertEqual(0, process.returncode) # Briefly check the model topology. with open(os.path.join(self._tmp_dir, 'model.json'), 'rt') as f: json_content = json.load(f) model_json = json_content['modelTopology'] self.assertIsInstance(model_json['model_config'], dict) self.assertIsInstance(model_json['model_config']['config'], dict) self.assertIn('layers', model_json['model_config']['config']) weights_manifest = json_content['weightsManifest'] self.assertIsInstance(weights_manifest, list) # Briefly check the weights manifest. weight_shapes = dict() weight_dtypes = dict() for manifest_item in weights_manifest: for weight in manifest_item['weights']: weight_name = weight['name'] weight_shapes[weight_name] = weight['shape'] weight_dtypes[weight_name] = weight['dtype'] self.assertEqual( sorted(list(weight_shapes.keys())), sorted([ 'MergedDenseForCLI1/kernel', 'MergedDenseForCLI1/bias', 'MergedDenseForCLI2/kernel' ])) self.assertEqual(weight_shapes['MergedDenseForCLI1/kernel'], [3, 4]) self.assertEqual(weight_shapes['MergedDenseForCLI1/bias'], [4]) self.assertEqual(weight_shapes['MergedDenseForCLI2/kernel'], [4, 2]) self.assertEqual(weight_dtypes['MergedDenseForCLI1/kernel'], 'float32') self.assertEqual(weight_dtypes['MergedDenseForCLI1/bias'], 'float32') self.assertEqual(weight_dtypes['MergedDenseForCLI2/kernel'], 'float32') # Verify that there are two weight groups due to the optional flag # --split_weights_by_layer behavior. The model is a small one. None of # the layers should have weight sizes exceeding the 4-MB shard size # limit. self.assertEqual( 2, len(glob.glob(os.path.join(self._tmp_dir, 'group*')))) def testKerasH5ConversionWithSignatureNameErrors(self): process = subprocess.Popen( [ 'tensorflowjs_converter', '--input_format', 'keras', '--signature_name', 'bar', os.path.join(self._tmp_dir, 'foo.h5'), os.path.join(self._tmp_dir, 'output') ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, stderr = process.communicate() self.assertGreater(process.returncode, 0) self.assertIn( b'The --signature_name flag is applicable only to', tf.compat.as_bytes(stderr)) def testConvertTFSavedModelV1WithCommandLineWorks(self): output_dir = os.path.join(self._tmp_dir) process = subprocess.Popen([ 'tensorflowjs_converter', '--input_format', 'tf_saved_model', '--output_format', 'tfjs_graph_model', self.tf_saved_model_v1_dir, output_dir ]) process.communicate() self.assertEqual(0, process.returncode) weights = [{ 'paths': ['group1-shard1of1.bin'], 'weights': [{'dtype': 'float32', 'name': 'w', 'shape': [2, 2]}]}] # Load the saved weights as a JSON string. output_json = json.load( open(os.path.join(output_dir, 'model.json'), 'rt')) self.assertEqual(output_json['weightsManifest'], weights) # Check the content of the output directory. self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*'))) def testConvertTFHubModuleWithCommandLineWorks(self): output_dir = os.path.join(self._tmp_dir) process = subprocess.Popen([ 'tensorflowjs_converter', '--input_format', 'tf_hub', self.tf_hub_module_dir, output_dir ]) process.communicate() self.assertEqual(0, process.returncode) weights = [{ 'paths': ['group1-shard1of1.bin'], 'weights': [{ 'shape': [2], 'name': 'module/Variable', 'dtype': 'float32' }] }] # Load the saved weights as a JSON string. output_json = json.load( open(os.path.join(output_dir, 'model.json'), 'rt')) self.assertEqual(output_json['weightsManifest'], weights) # Check the content of the output directory. self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*'))) def testConvertTFSavedModelWithCommandLineWorks(self): output_dir = os.path.join(self._tmp_dir) process = subprocess.Popen([ 'tensorflowjs_converter', '--input_format', 'tf_saved_model', '--output_format', 'tfjs_graph_model', self.tf_saved_model_dir, output_dir ]) process.communicate() self.assertEqual(0, process.returncode) weights = [{ 'paths': ['group1-shard1of1.bin'], 'weights': [{ 'dtype': 'float32', 'shape': [], 'name': 'StatefulPartitionedCall/mul' }] }] # Load the saved weights as a JSON string. output_json = json.load( open(os.path.join(output_dir, 'model.json'), 'rt'))
= self.cls p = P(BASE, 'linkB') paths = set(p.iterdir()) expected = { P(BASE, 'linkB', q) for q in ['fileB', 'linkD'] } self.assertEqual(paths, expected) def test_iterdir_nodir(self): # __iter__ on something that is not a directory p = self.cls(BASE, 'fileA') with self.assertRaises(OSError) as cm: next(p.iterdir()) # ENOENT or EINVAL under Windows, ENOTDIR otherwise # (see issue #12802) self.assertIn(cm.exception.errno, (errno.ENOTDIR, errno.ENOENT, errno.EINVAL)) def test_glob_common(self): def _check(glob, expected): self.assertEqual(set(glob), { P(BASE, q) for q in expected }) P = self.cls p = P(BASE) it = p.glob("fileA") self.assertIsInstance(it, collections.abc.Iterator) _check(it, ["fileA"]) _check(p.glob("fileB"), []) _check(p.glob("dir*/file*"), ["dirB/fileB", "dirC/fileC"]) if not support.can_symlink(): _check(p.glob("*A"), ['dirA', 'fileA']) else: _check(p.glob("*A"), ['dirA', 'fileA', 'linkA']) if not support.can_symlink(): _check(p.glob("*B/*"), ['dirB/fileB']) else: _check(p.glob("*B/*"), ['dirB/fileB', 'dirB/linkD', 'linkB/fileB', 'linkB/linkD']) if not support.can_symlink(): _check(p.glob("*/fileB"), ['dirB/fileB']) else: _check(p.glob("*/fileB"), ['dirB/fileB', 'linkB/fileB']) def test_rglob_common(self): def _check(glob, expected): self.assertEqual(set(glob), { P(BASE, q) for q in expected }) P = self.cls p = P(BASE) it = p.rglob("fileA") self.assertIsInstance(it, collections.abc.Iterator) _check(it, ["fileA"]) _check(p.rglob("fileB"), ["dirB/fileB"]) _check(p.rglob("*/fileA"), []) if not support.can_symlink(): _check(p.rglob("*/fileB"), ["dirB/fileB"]) else: _check(p.rglob("*/fileB"), ["dirB/fileB", "dirB/linkD/fileB", "linkB/fileB", "dirA/linkC/fileB"]) _check(p.rglob("file*"), ["fileA", "dirB/fileB", "dirC/fileC", "dirC/dirD/fileD"]) p = P(BASE, "dirC") _check(p.rglob("file*"), ["dirC/fileC", "dirC/dirD/fileD"]) _check(p.rglob("*/*"), ["dirC/dirD/fileD"]) @support.skip_unless_symlink def test_rglob_symlink_loop(self): # Don't get fooled by symlink loops (Issue #26012) P = self.cls p = P(BASE) given = set(p.rglob('*')) expect = {'brokenLink', 'dirA', 'dirA/linkC', 'dirB', 'dirB/fileB', 'dirB/linkD', 'dirC', 'dirC/dirD', 'dirC/dirD/fileD', 'dirC/fileC', 'dirE', 'fileA', 'linkA', 'linkB', 'brokenLinkLoop', } self.assertEqual(given, {p / x for x in expect}) def test_glob_dotdot(self): # ".." is not special in globs P = self.cls p = P(BASE) self.assertEqual(set(p.glob("..")), { P(BASE, "..") }) self.assertEqual(set(p.glob("dirA/../file*")), { P(BASE, "dirA/../fileA") }) self.assertEqual(set(p.glob("../xyzzy")), set()) def _check_resolve(self, p, expected, strict=True): q = p.resolve(strict) self.assertEqual(q, expected) # this can be used to check both relative and absolute resolutions _check_resolve_relative = _check_resolve_absolute = _check_resolve @support.skip_unless_symlink def test_resolve_common(self): P = self.cls p = P(BASE, 'foo') with self.assertRaises(OSError) as cm: p.resolve(strict=True) self.assertEqual(cm.exception.errno, errno.ENOENT) # Non-strict self.assertEqualNormCase(str(p.resolve(strict=False)), os.path.join(BASE, 'foo')) p = P(BASE, 'foo', 'in', 'spam') self.assertEqualNormCase(str(p.resolve(strict=False)), os.path.join(BASE, 'foo', 'in', 'spam')) p = P(BASE, '..', 'foo', 'in', 'spam') self.assertEqualNormCase(str(p.resolve(strict=False)), os.path.abspath(os.path.join('foo', 'in', 'spam'))) # These are all relative symlinks. p = P(BASE, 'dirB', 'fileB') self._check_resolve_relative(p, p) p = P(BASE, 'linkA') self._check_resolve_relative(p, P(BASE, 'fileA')) p = P(BASE, 'dirA', 'linkC', 'fileB') self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB')) p = P(BASE, 'dirB', 'linkD', 'fileB') self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB')) # Non-strict p = P(BASE, 'dirA', 'linkC', 'fileB', 'foo', 'in', 'spam') self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB', 'foo', 'in', 'spam'), False) p = P(BASE, 'dirA', 'linkC', '..', 'foo', 'in', 'spam') if os.name == 'nt': # In Windows, if linkY points to dirB, 'dirA\linkY\..' # resolves to 'dirA' without resolving linkY first. self._check_resolve_relative(p, P(BASE, 'dirA', 'foo', 'in', 'spam'), False) else: # In Posix, if linkY points to dirB, 'dirA/linkY/..' # resolves to 'dirB/..' first before resolving to parent of dirB. self._check_resolve_relative(p, P(BASE, 'foo', 'in', 'spam'), False) # Now create absolute symlinks d = support._longpath(tempfile.mkdtemp(suffix='-dirD', dir=os.getcwd())) self.addCleanup(support.rmtree, d) os.symlink(os.path.join(d), join('dirA', 'linkX')) os.symlink(join('dirB'), os.path.join(d, 'linkY')) p = P(BASE, 'dirA', 'linkX', 'linkY', 'fileB') self._check_resolve_absolute(p, P(BASE, 'dirB', 'fileB')) # Non-strict p = P(BASE, 'dirA', 'linkX', 'linkY', 'foo', 'in', 'spam') self._check_resolve_relative(p, P(BASE, 'dirB', 'foo', 'in', 'spam'), False) p = P(BASE, 'dirA', 'linkX', 'linkY', '..', 'foo', 'in', 'spam') if os.name == 'nt': # In Windows, if linkY points to dirB, 'dirA\linkY\..' # resolves to 'dirA' without resolving linkY first. self._check_resolve_relative(p, P(d, 'foo', 'in', 'spam'), False) else: # In Posix, if linkY points to dirB, 'dirA/linkY/..' # resolves to 'dirB/..' first before resolving to parent of dirB. self._check_resolve_relative(p, P(BASE, 'foo', 'in', 'spam'), False) @support.skip_unless_symlink def test_resolve_dot(self): # See https://bitbucket.org/pitrou/pathlib/issue/9/pathresolve-fails-on-complex-symlinks p = self.cls(BASE) self.dirlink('.', join('0')) self.dirlink(os.path.join('0', '0'), join('1')) self.dirlink(os.path.join('1', '1'), join('2')) q = p / '2' self.assertEqual(q.resolve(strict=True), p) r = q / '3' / '4' self.assertRaises(FileNotFoundError, r.resolve, strict=True) # Non-strict self.assertEqual(r.resolve(strict=False), p / '3' / '4') def test_with(self): p = self.cls(BASE) it = p.iterdir() it2 = p.iterdir() next(it2) with p: pass # I/O operation on closed path self.assertRaises(ValueError, next, it) self.assertRaises(ValueError, next, it2) self.assertRaises(ValueError, p.open) self.assertRaises(ValueError, p.resolve) self.assertRaises(ValueError, p.absolute) self.assertRaises(ValueError, p.__enter__) def test_chmod(self): p = self.cls(BASE) / 'fileA' mode = p.stat().st_mode # Clear writable bit new_mode = mode & ~0o222 p.chmod(new_mode) self.assertEqual(p.stat().st_mode, new_mode) # Set writable bit new_mode = mode | 0o222 p.chmod(new_mode) self.assertEqual(p.stat().st_mode, new_mode) # XXX also need a test for lchmod def test_stat(self): p = self.cls(BASE) / 'fileA' st = p.stat() self.assertEqual(p.stat(), st) # Change file mode by flipping write bit p.chmod(st.st_mode ^ 0o222) self.addCleanup(p.chmod, st.st_mode) self.assertNotEqual(p.stat(), st) @support.skip_unless_symlink def test_lstat(self): p = self.cls(BASE)/ 'linkA' st = p.stat() self.assertNotEqual(st, p.lstat()) def test_lstat_nosymlink(self): p = self.cls(BASE) / 'fileA' st = p.stat() self.assertEqual(st, p.lstat()) @unittest.skipUnless(pwd, "the pwd module is needed for this test") def test_owner(self): p = self.cls(BASE) / 'fileA' uid = p.stat().st_uid try: name = pwd.getpwuid(uid).pw_name except KeyError: self.skipTest( "user %d doesn't have an entry in the system database" % uid) self.assertEqual(name, p.owner()) @unittest.skipUnless(grp, "the grp module is needed for this test") def test_group(self): p = self.cls(BASE) / 'fileA' gid = p.stat().st_gid try: name = grp.getgrgid(gid).gr_name except KeyError: self.skipTest( "group %d doesn't have an entry in the system database" % gid) self.assertEqual(name, p.group()) def test_unlink(self): p = self.cls(BASE) / 'fileA' p.unlink() self.assertFileNotFound(p.stat) self.assertFileNotFound(p.unlink) def test_rmdir(self): p = self.cls(BASE) / 'dirA' for q in p.iterdir(): q.unlink() p.rmdir() self.assertFileNotFound(p.stat) self.assertFileNotFound(p.unlink) def test_rename(self): P = self.cls(BASE) p = P / 'fileA' size = p.stat().st_size # Renaming to another path q = P / 'dirA' / 'fileAA' p.rename(q) self.assertEqual(q.stat().st_size, size) self.assertFileNotFound(p.stat) # Renaming to a str of a relative path r = rel_join('fileAAA') q.rename(r) self.assertEqual(os.stat(r).st_size, size) self.assertFileNotFound(q.stat) def test_replace(self): P = self.cls(BASE) p = P / 'fileA' size = p.stat().st_size # Replacing a non-existing path q = P / 'dirA' / 'fileAA' p.replace(q) self.assertEqual(q.stat().st_size, size) self.assertFileNotFound(p.stat) # Replacing another (existing) path r = rel_join('dirB', 'fileB') q.replace(r) self.assertEqual(os.stat(r).st_size, size) self.assertFileNotFound(q.stat) def test_touch_common(self): P = self.cls(BASE) p = P / 'newfileA' self.assertFalse(p.exists()) p.touch() self.assertTrue(p.exists()) st = p.stat() old_mtime = st.st_mtime old_mtime_ns = st.st_mtime_ns # Rewind the mtime sufficiently far in the past to work around # filesystem-specific timestamp granularity. os.utime(str(p), (old_mtime - 10, old_mtime - 10)) # The file mtime should be refreshed by calling touch() again p.touch() st = p.stat() self.assertGreaterEqual(st.st_mtime_ns, old_mtime_ns) self.assertGreaterEqual(st.st_mtime, old_mtime) # Now with exist_ok=False p = P / 'newfileB' self.assertFalse(p.exists()) p.touch(mode=0o700, exist_ok=False) self.assertTrue(p.exists()) self.assertRaises(OSError, p.touch, exist_ok=False) def test_touch_nochange(self): P = self.cls(BASE) p = P / 'fileA' p.touch() with p.open('rb') as f: self.assertEqual(f.read().strip(), b"this is file A") def test_mkdir(self): P = self.cls(BASE) p = P / 'newdirA' self.assertFalse(p.exists()) p.mkdir() self.assertTrue(p.exists()) self.assertTrue(p.is_dir()) with self.assertRaises(OSError) as cm: p.mkdir() self.assertEqual(cm.exception.errno, errno.EEXIST) def test_mkdir_parents(self): # Creating a chain of directories p = self.cls(BASE, 'newdirB', 'newdirC') self.assertFalse(p.exists()) with self.assertRaises(OSError) as cm: p.mkdir() self.assertEqual(cm.exception.errno, errno.ENOENT) p.mkdir(parents=True) self.assertTrue(p.exists()) self.assertTrue(p.is_dir()) with self.assertRaises(OSError) as cm: p.mkdir(parents=True) self.assertEqual(cm.exception.errno, errno.EEXIST) # test `mode` arg mode = stat.S_IMODE(p.stat().st_mode) # default mode p = self.cls(BASE, 'newdirD', 'newdirE') p.mkdir(0o555, parents=True) self.assertTrue(p.exists()) self.assertTrue(p.is_dir()) if os.name != 'nt': # the directory's permissions follow the mode argument self.assertEqual(stat.S_IMODE(p.stat().st_mode), 0o7555 & mode) # the parent's permissions follow the default process settings self.assertEqual(stat.S_IMODE(p.parent.stat().st_mode), mode) def test_mkdir_exist_ok(self): p = self.cls(BASE, 'dirB') st_ctime_first = p.stat().st_ctime self.assertTrue(p.exists()) self.assertTrue(p.is_dir()) with self.assertRaises(FileExistsError) as cm: p.mkdir() self.assertEqual(cm.exception.errno, errno.EEXIST) p.mkdir(exist_ok=True) self.assertTrue(p.exists()) self.assertEqual(p.stat().st_ctime, st_ctime_first) def test_mkdir_exist_ok_with_parent(self): p = self.cls(BASE, 'dirC') self.assertTrue(p.exists()) with self.assertRaises(FileExistsError) as cm: p.mkdir() self.assertEqual(cm.exception.errno, errno.EEXIST) p = p / 'newdirC' p.mkdir(parents=True) st_ctime_first = p.stat().st_ctime self.assertTrue(p.exists()) with self.assertRaises(FileExistsError) as cm: p.mkdir(parents=True) self.assertEqual(cm.exception.errno, errno.EEXIST) p.mkdir(parents=True, exist_ok=True) self.assertTrue(p.exists()) self.assertEqual(p.stat().st_ctime, st_ctime_first) def test_mkdir_exist_ok_root(self): # Issue #25803: A drive root could raise PermissionError on Windows self.cls('/').resolve().mkdir(exist_ok=True) self.cls('/').resolve().mkdir(parents=True, exist_ok=True) @only_nt # XXX: not sure how to test this on POSIX def test_mkdir_with_unknown_drive(self): for d in 'ZYXWVUTSRQPONMLKJIHGFEDCBA': p = self.cls(d + ':\\') if not p.is_dir(): break else: self.skipTest("cannot find a drive that doesn't exist") with self.assertRaises(OSError): (p / 'child' / 'path').mkdir(parents=True) def test_mkdir_with_child_file(self): p = self.cls(BASE, 'dirB', 'fileB') self.assertTrue(p.exists()) # An exception is raised when the last path component is an existing # regular file, regardless of whether exist_ok is true or not. with self.assertRaises(FileExistsError) as cm: p.mkdir(parents=True) self.assertEqual(cm.exception.errno, errno.EEXIST) with self.assertRaises(FileExistsError) as cm: p.mkdir(parents=True, exist_ok=True) self.assertEqual(cm.exception.errno, errno.EEXIST) def test_mkdir_no_parents_file(self):
self.find_node_property(input_key, self.mat_property_dict) if property_type == "Value": # Check if Info is a Hex Color if isinstance(property_info, str): property_info = self.convert_color( property_info, shader_node ) if input_key == "Normal Map: Value": if isinstance(property_info, list): property_info = 1 shader_node.inputs[input_key].default_value = property_info if property_type == "Texture": if os.path.exists(property_info): self.check_map_type(property_key) tex_image_node = mat_nodes.new( type="ShaderNodeTexImage" ) self.create_texture_input(property_info, tex_image_node) tex_node_output = tex_image_node.outputs["Color"] mat_links.new( tex_node_output, shader_node.inputs[input_key] ) # handle Tile Horizontal_Tiles = self.mat_property_dict.get("Horizontal Tiles") Vertical_Tiles = self.mat_property_dict.get("Vertical Tiles") if Horizontal_Tiles["Value"] > 1 or Vertical_Tiles["Value"] > 1: # create Mapping node and Coord node mapping_node = mat_nodes.new("ShaderNodeMapping") coord_node = mat_nodes.new("ShaderNodeTexCoord") # set value # x mapping_node.inputs["Scale"].default_value[0] = Horizontal_Tiles["Value"] # y mapping_node.inputs["Scale"].default_value[1] = Vertical_Tiles["Value"] # link them mat_links.new(coord_node.outputs["UV"], mapping_node.inputs["Vector"]) # link mapping_node to all texture node for node in mat_nodes: if node.bl_idname == "ShaderNodeTexImage": mat_links.new(mapping_node.outputs["Vector"], node.inputs["Vector"]) # Set Alpha Modes self.check_refract() self.set_eevee_refract(mat) self.set_eevee_alpha(mat) # Set the cycles displacement method if node_group == "IrayUberSkin": mat_links.new( shader_node.outputs["Displacement"], out_node_cy.inputs["Displacement"], ) mat.cycles.displacement_method = "BOTH" else: mat.cycles.displacement_method = "BUMP" if mat_nodes is not None: NodeArrange.toNodeArrange(mat_nodes) #set a texture node def set_tex_node(self, tex_path, property_name, mat_nodes, mat_links, shader_node, input_key): if os.path.exists(tex_path): # this will set is_Diffuse or is_Alpha, which will be used in create_texture_input self.check_map_type(property_name) tex_image_node = mat_nodes.new("ShaderNodeTexImage") self.create_texture_input(tex_path, tex_image_node) tex_node_output = tex_image_node.outputs["Color"] mat_links.new( tex_node_output, shader_node.inputs[input_key] ) #set value or add texture node def set_value_or_tex(self, property_name, mat_nodes, mat_links, shader_node, input_key): property = self.mat_property_dict.get(property_name) if property is None: print("can not find: " + property_name) return if len(property["Texture"])>0: tex_path = property["Texture"] self.set_tex_node(tex_path, property_name, mat_nodes, mat_links, shader_node, input_key) elif property["Data Type"] == "Double": shader_node.inputs[input_key].default_value = property["Value"] #set color or add texture node def set_color_or_tex(self, property_name, mat_nodes, mat_links, shader_node, input_key): property = self.mat_property_dict.get(property_name) if property is None: print("can not find: " + property_name) return if property["Data Type"] == "Color": color = self.daz_color_to_rgb(property["Value"]) shader_node.inputs[input_key].default_value = (color[0], color[1], color[2], color[3]) elif len(property["Texture"])>0: tex_path = property["Texture"] self.set_tex_node(tex_path, "Diffuse", mat_nodes, mat_links, shader_node, input_key) # use Principled BSDF shader def setup_principled_materials(self, obj): for mat_slot in obj.material_slots: mat = mat_slot.material mat_name = mat.name obj_name = obj.name.replace(".Shape", "") obj_name = obj_name.split(".")[0] if mat is None: # Get or create a new material when slot is missing material mat = bpy.data.materials.get(mat_slot.name) or bpy.data.materials.new( name=mat_slot.name ) mat_slot.material = mat if obj_name not in self.mat_data_dict.keys(): continue if mat_name not in self.mat_data_dict[obj_name].keys(): mat_name = mat.name.split(".")[0] if mat_name not in self.mat_data_dict[obj_name].keys(): continue mat_data = self.mat_data_dict[obj_name][mat_name] self.mat_property_dict = self.get_mat_properties(mat_data) # Set Custom Properties for key in mat_data: if not key == "Properties": mat[key] = mat_data[key] # Update Name new_name = mat["Asset Label"] + "_" + mat["Material Name"] if bpy.context.window_manager.combine_materials: # To Deal with a duplicate being converted first. if new_name in bpy.data.materials: mat_slot.material = bpy.data.materials[new_name] bpy.data.materials.remove(mat) continue mat.name = new_name mat_name = mat.name # To Deal with duplications if self.optimize_materials(mat_slot): continue mat.use_nodes = True mat_nodes = mat.node_tree.nodes mat_links = mat.node_tree.links # change viewport display mat.metallic = 0 # set viewport display color # set eyeball to black if "Irises" in mat_name: mat.diffuse_color = (0,0,0,1) elif "Pupils" in mat_name: mat.diffuse_color = (0,0,0,1) elif "Cornea" in mat_name: mat.diffuse_color = (0,0,0,0) elif "_Lips" in mat_name: mat.diffuse_color = (1, 0.8, 0.8, 1) elif "Eyelashes" in mat_name: mat.diffuse_color = (0,0,0,0.5) # map iray shader to Principled BSDF shader from here # find Principled BSDF shader node shader_node = mat_nodes.get("Principled BSDF") # find Principled BSDF by bl_idname if shader_node is None: for node in mat_nodes: if node.bl_idname == "ShaderNodeBsdfPrincipled": shader_node = node # if still no shader node, then create one if shader_node is None: shader_node = mat_nodes.new("ShaderNodeBsdfPrincipled") if shader_node is None: print("can not find Principled BSDF node for mat: " + mat.name) # do not create a new Principled BSDF, so we can see what may be wrong from shader editor # make it easier to fix bugs continue # change subsurface_method to fixed. The new automatically one is not good and has bugs shader_node.subsurface_method = 'RANDOM_WALK_FIXED_RADIUS' # show info for debug # pprint.pprint(self.mat_property_dict) # print(" ") # find "Normal Map" node normal_node = mat_nodes.get("Normal Map") if normal_node is not None: # change label "Normal/Map" to "Normal Map" # there is no such name as "Normal/Map" in blender normal_node.label = "Normal Map" # find normal node by bl_idname if normal_node is None: for node in mat_nodes: if node.bl_idname == "ShaderNodeNormalMap": normal_node = node # if still no normal node, then create one if normal_node is None: normal_node = mat_nodes.new("ShaderNodeNormalMap") # map iray material to blender # iray mat's Properties looks like this: # { # "Name": "Diffuse Weight", # "Label": "Diffuse Weight", # "Value": 1, # "Data Type": "Double", # "Texture": "" # }, # { # "Name": "Diffuse Color", # "Label": "Base Color", # "Value": "#ffffff", # "Data Type": "Color", # "Texture": "texture_path.jpg" # }, # { # "Name": "Diffuse Overlay Color", # "Label": "Diffuse Overlay Color", # "Value": "#c0c0c0", # "Data Type": "Color", # "Texture": "" # }, for input_key in shader_node.inputs.keys(): # reset property = None tex_path = "" if input_key == "Base Color": # an Image Texture node should already be linked to shader_node property = self.mat_property_dict.get("Diffuse Color") if property["Texture"] == "": # set color value color = self.daz_color_to_rgb(property["Value"]) shader_node.inputs[input_key].default_value = (color[0], color[1], color[2], color[3]) elif input_key == "Subsurface": property = self.mat_property_dict.get("Translucency Weight") if property is None: print("can not find: " + "Translucency Weight") continue if property["Data Type"] == "Double": shader_node.inputs[input_key].default_value = property["Value"] * Global.sss_rate elif len(property["Texture"])>0: tex_path = property["Texture"] self.set_tex_node(tex_path, "Translucency Weight", mat_nodes, mat_links, shader_node, input_key) elif input_key == "Subsurface Radius": # always use this value shader_node.inputs[input_key].default_value[0] = 0.2 shader_node.inputs[input_key].default_value[1] = 0.2 shader_node.inputs[input_key].default_value[2] = 0.2 elif input_key == "Subsurface Color": Translucency_Color = self.mat_property_dict.get("Translucency Color") # do not use Translucency Color's texture, always use the color value color = self.daz_color_to_rgb(Translucency_Color["Value"]) # Base_Color_Effect values: Scatter Only(0), Scatter & Transmit(1),Scatter & Transmit Intensity(2) Base_Color_Effect = self.mat_property_dict.get("Base Color Effect") if Base_Color_Effect is not None: if Base_Color_Effect["Value"] != 0: SSS_Reflectance_Tint = self.mat_property_dict.get("SSS Reflectance Tint") tint_color = self.daz_color_to_rgb(SSS_Reflectance_Tint["Value"]) #r color[0] = color[0] * tint_color[0] #g color[1] = color[1] * tint_color[1] #b color[2] = color[2] * tint_color[2] #alpha is always 1, so no need to handle # set blender's color shader_node.inputs[input_key].default_value = (color[0], color[1], color[2], color[3]) elif input_key == "Subsurface IOR": # useless pass elif input_key == "Subsurface Anisotropy": # useless pass elif input_key == "Metallic": self.set_value_or_tex("Metallic Weight", mat_nodes, mat_links, shader_node, input_key) elif input_key == "Specular": #need to merge Glossy and Dual Lobe Specular Dual_Lobe_Specular_Weight = self.mat_property_dict.get("Dual Lobe Specular Weight") Dual_Lobe_Specular_Reflectivity = self.mat_property_dict.get("Dual Lobe Specular Reflectivity") Specular_Lobe_1_Roughness = self.mat_property_dict.get("Specular Lobe 1 Roughness") Specular_Lobe_2_Roughness = self.mat_property_dict.get("Specular Lobe 2 Roughness") Dual_Lobe_Specular_Ratio = self.mat_property_dict.get("Dual Lobe Specular Ratio") Glossy_Layered_Weight = self.mat_property_dict.get("Glossy Layered Weight") Glossy_Weight = self.mat_property_dict.get("Glossy Weight") if Glossy_Weight["Value"] > 0: Glossy_Layered_Weight = Glossy_Weight Glossy_Reflectivity = self.mat_property_dict.get("Glossy Reflectivity") Glossy_Roughness = self.mat_property_dict.get("Glossy Roughness") # calculate dual lobe specular's final roughness r1 = Specular_Lobe_1_Roughness["Value"] r2 = Specular_Lobe_2_Roughness["Value"] ratio = Dual_Lobe_Specular_Ratio["Value"] # (1- rated_r) = ((1-r2) * (1-ratio) + (1-r1) * ratio) rated_r = 1 - (((1-r2) * (1-ratio)) + ((1-r1) * ratio)) if rated_r > 1: rated_r = 1 elif rated_r < 0: rated_r = 0 if Dual_Lobe_Specular_Weight["Value"] > 0 and len(Dual_Lobe_Specular_Weight["Texture"])>0: #use Dual_Lobe_Specular_Weight texture self.set_tex_node(Dual_Lobe_Specular_Weight["Texture"], "Dual Lobe Specular Weight", mat_nodes, mat_links, shader_node, input_key) elif Dual_Lobe_Specular_Weight["Value"] > 0 and len(Dual_Lobe_Specular_Reflectivity["Texture"])>0: #use Dual_Lobe_Specular_Reflectivity texture self.set_tex_node(Dual_Lobe_Specular_Reflectivity["Texture"], "Dual Lobe Specular Reflectivity", mat_nodes, mat_links, shader_node, input_key) elif Glossy_Layered_Weight["Value"] > 0 and len(Glossy_Layered_Weight["Texture"])>0: #use Glossy_Layered_Weight texture self.set_tex_node(Glossy_Layered_Weight["Texture"], "Glossy Layered Weight", mat_nodes, mat_links, shader_node, input_key) elif Glossy_Layered_Weight["Value"] > 0 and len(Glossy_Reflectivity["Texture"])>0: #use Glossy_Reflectivity texture self.set_tex_node(Glossy_Reflectivity["Texture"], "Glossy Reflectivity", mat_nodes, mat_links, shader_node, input_key) elif Dual_Lobe_Specular_Weight["Value"] > 0 and Glossy_Layered_Weight["Value"] == 0: #use Dual_Lobe_Specular value shader_node.inputs[input_key].default_value = Dual_Lobe_Specular_Reflectivity["Value"] * Dual_Lobe_Specular_Weight["Value"] elif Dual_Lobe_Specular_Weight["Value"] == 0 and Glossy_Layered_Weight["Value"] > 0: #use Glossy value shader_node.inputs[input_key].default_value = Glossy_Reflectivity["Value"] * Glossy_Layered_Weight["Value"] elif Dual_Lobe_Specular_Weight["Value"] > 0 and Glossy_Layered_Weight["Value"] > 0: # merge value spec_value = Dual_Lobe_Specular_Reflectivity["Value"] * Dual_Lobe_Specular_Weight["Value"] * (1-rated_r) glossy_value = Glossy_Reflectivity["Value"] * Glossy_Layered_Weight["Value"] * (1-Glossy_Roughness["Value"]) # use the higher one value = Glossy_Reflectivity["Value"] * Glossy_Layered_Weight["Value"] if spec_value > glossy_value: value = Dual_Lobe_Specular_Reflectivity["Value"] * Dual_Lobe_Specular_Weight["Value"] #
for bom_meta in allcurrentbommeta: bom_meta_dict[bom_meta['name']] = bom_meta bomtablenames = this_database.get_table_names() new_meta = [] for tablename in bomtablenames: if tablename == 'metadata': continue # Check to see if metadata already exist. We need to maintain activity status and notes if tablename in bom_meta_dict: this_meta = bom_meta_dict[tablename] else: # Initialize meta this_meta = {'name':tablename} # Get metadata for each table itemcount = this_database.get_table_size(tablename) this_meta['itemcount'] = itemcount # Calc some other data bomitems = this_database.read_table(tablename) cost = 0 price = 0 if bomitems: for item in bomitems: if item['totalcost']: # Pick up errors for problems converting float try: cost += float(item['totalcost']) except: pass if item['totalprice']: try: price += float(item['totalprice']) except: pass this_meta['price'] = price this_meta['cost'] = cost this_meta['profit'] = price - cost new_meta.append(this_meta) if new_meta: this_database.create_table('metadata', schema=tableitems.bommetaitems_schema) this_database.insert('metadata', new_meta) def backfillbomfromstock(bomitems, recalc=True): from iiutilities import dblib newbomitems = [] for bomitem in bomitems: stockdbresult = dblib.readonedbrow(sysvars.dirs.dbs.stock, 'stock', condition="partid='" + bomitem['partid'] + "'") if stockdbresult: stockitem = stockdbresult[0] else: print(' ITEM "' + bomitem['partid'] + '" NOT FOUND ') stockitem = {} # Backfill all properties that do not exist for property in tableitems.bompart_schema.columns(): if property in stockitem and property not in bomitem: bomitem[property] = stockitem[property] if recalc: recalcitem(bomitem) newbomitems.append(bomitem) return newbomitems # We can feed this either a BOMname or a raw BOM dict array def calcbomprice(d, output={'message': ''}, recalc=True, precision=2): from iiutilities import dblib from iiutilities.datalib import setprecision # Use the already written recalc routine here if recalc and 'bomname' in d: # This will reload all margin data and do multipliers, so no need to # futz with multiplication elsewhere recalcpartdata(**{'bomname': d['bomname']}) bomresults = {'cost': 0, 'price': 0} if 'bomdictarray' in d: # directly calculate bom price output['message'] == 'bomdictarray keyword found. ' bomdictarray = d['bomdictarray'] # This will not do any recalc. Below, we will use what we find if we find it, and retrieve what we do not pass elif 'bomname' in d: output['message'] += 'bomname keyword found. ' bomdictarray = sysvars.dbs.boms.read_table(d['bomname']) else: return None calcvalues = {} calcdicts = sysvars.dbs.system.read_table('calcs') for calcdict in calcdicts: calcvalues[calcdict['item']] = calcdict['value'] bomresults[calcdict['item']] = calcdict['value'] totalcost = 0 totalprice = 0 for bomitem in bomdictarray: # Use existing results to calculate totalprice, totalcost, and # Prices for each part category # If cost, margin, etc. are not found, retrieve them from stockpartdata. This will allow us to # feed a BOM dict array in and get answers out even without if 'totalcost' not in bomitem: # Now we have to recalc bomitem = backfillbomfromstock([bomitem])[0] try: itemcost = float(bomitem['totalcost']) except: itemcost = 0 itemtype = bomitem['type'] if itemtype + 'cost' not in bomresults: bomresults[itemtype + 'cost'] = 0 bomresults[itemtype + 'cost'] += itemcost totalcost += itemcost if itemtype + 'margin' in bomresults: margin = float(calcvalues[itemtype + 'margin']) bomresults[itemtype + 'margin'] = margin if itemtype + 'price' not in bomresults: bomresults[itemtype + 'price'] = 0 itemprice = float(bomitem['totalprice']) bomresults[itemtype + 'price'] += itemprice bomresults[itemtype + 'profit'] = bomresults[itemtype + 'price'] - bomresults[itemtype + 'cost'] bomresults['price'] += itemprice totalprice += itemprice bomresults['totalcost'] = totalcost bomresults['totalprice'] = totalprice bomresults['totalprofit'] = totalprice - totalcost try: bomresults['totalmargin'] = totalprice / totalcost - 1 except: bomresults['totalmargin'] = 0 try: bomresults['totalmarginjustparts'] = totalprice / bomresults['partscost'] - 1 except: bomresults['totalmargin'] = 0 for name, value in bomresults.items(): # print(name,value) bomresults[name] = setprecision(value, precision) output['data'] = bomresults return output def deletepartsfrombom(d, output={'message': ''}): from iiutilities import dblib, datalib database = sysvars.dirs.dbs.boms # In here we should test to see if the request is valid. First, let us make sure we have all the required # fields we need: # partid, description, manufacturer, manufacturerpart if 'partids' in d and 'bomname' in d: if not isinstance(d['partids'], list): d['partids'] = [d['partids']] else: output['message'] += 'No partids and/or bomname found in copy request dictionary. ' return output output['message'] += 'Bomname ' + d['bomname'] output['message'] += 'Partids: ' + str(d['partids']) + '. ' querylist = [] for id in d['partids']: output['message'] += 'Deleting part ' + id + '. ' querylist.append(dblib.makedeletesinglevaluequery(d['bomname'], "partid='" + id + "'")) dblib.sqlitemultquery(database, querylist) # Recalc bom recalcpartdata(bomname=d['bomname']) makebommetadata() # Update metadata condition = "name='" + d['bomname'] + "'" dblib.setsinglevalue(database, 'metadata', 'modified', datalib.gettimestring(), condition=condition) return output def deleteboms(d, output={'message': ''}): from iiutilities import dblib if 'bomname' in d: output['message'] += 'Single bomname found. ' bomnames = [d['bomname']] elif 'bomnames' in d: output['message'] += 'Bomnames keyword found. ' if not d['bomnames']: output['message'] += 'Empty bomnames value. ' return output else: bomnames = d['bomnames'] for bomname in bomnames: output['message'] += 'Deleting bom with name' + bomname + '. ' dblib.sqlitedroptable(sysvars.dirs.dbs.boms, bomname) return output def copybom(d, output={'message': ''}): from iiutilities import dblib, datalib # In here we should test to see if the request is valid. First, let us make sure we have all the required # fields we need: # partid, description, manufacturer, manufacturerpart database = sysvars.dirs.dbs.boms if 'bomname' in d: pass else: output['message'] += 'No bomname found in copy request dictionary. ' return output bomnames = dblib.gettablenames(database) try: bomnames.index(d['bomname']) except: output['message'] += 'Bomname ' + d['bomname'] + ' not found in list of tables. ' return output else: if 'newbomname' in d: newbomname = d['newbomname'] else: newbomname = d['bomname'] + '_copy' try: dblib.sqliteduplicatetable(database, d['bomname'], newbomname) except: output['message'] += "Error copying BOM. " else: output['message'] += 'BOM copy appears to have been successful. ' # Make a new bommeta entry so we can edit it makebommetadata() # Update metadata condition = "name='" + d['bomname'] + "'" dblib.setsinglevalue(database, 'metadata', 'modifieddate', datalib.gettimestring(), condition) return output def addeditbom(d, output={'message': ''}): from iiutilities import dblib, datalib settings = {'database':sysvars.dirs.dbs.boms} settings.update(d) boms_database = dblib.sqliteDatabase(settings['database']) # In here we should test to see if the request is valid. First, let us make sure we have all the required # fields we need: # partid, description, manufacturer, manufacturerpart if 'name' not in settings['bomdata']: output['message'] += 'No bomname found in edit request dictionary. ' return output # If we are modifying the partid of an existing part, we will first update the old part to have the new partid. # Then we will grab it as if it always had that partid. if 'originalname' in settings['bomdata']: output['message'] += 'Found original bomname. ' if settings['bomdata']['originalname'] != '' and settings['bomdata']['originalname'] != settings['bomdata']['name']: output['message'] += 'Found original bomname. Moving ' + settings['bomdata']['originalname'] + ' to ' + \ settings['bomdata']['name'] + ". " boms_database.move_table(settings['bomdata']['originalname'], settings['bomdata']['name']) # Now instead of autocreating the metadata, which would lose the existing fields, we are going to move the # metadata entry as well, then edit it and autocreate. All information should be retained. output['message'] += 'Updating metadata entry. ' dblib.setsinglevalue(settings['database'], 'metadata', 'name', settings['bomdata']['name'], "name='" + settings['bomdata']['originalname'] + "'") else: output['message'] += 'Original bomname is same as new bomname. ' # Pull the bom metadata entry and begin to update it. We're only updating name (done above) and other editable fields. # For the moment this is only the notes and status fields. Everything else is dynamic # First we try to pull it. If it does not exist, we have to create it and then recreate the metadata table # TODO: queue all entries where possible to speed up bomnames = dblib.gettablenames(settings['database']) if 'metadata' in bomnames: bomnames.remove('metadata') bomnames.sort() # mostrecentbomname = bomnames[-1] # print(settings['bomdata']['name']) # print(bomnames) if settings['bomdata']['name'] not in bomnames: output['message'] += 'BOM does not exist. Creating. ' boms_database.create_table(settings['bomdata']['name'], tableitems.bompart_schema) # And make a new metadata entry makebommetadata(database=boms_database) # Now update with creation data condition = "name='" + settings['bomdata']['name'] + "'" boms_database.set_single_value('metadata','creationdate', datalib.gettimestring(), condition=condition) else: output['message'] += 'Bom appears to exist. Continuing to edit. ' # Now we revise the existing entry allowedkeywords = ['notes', 'status'] for keyword in allowedkeywords: # mangledkeyword = 'bomdata[' + keyword + ']' modified = False if keyword in settings['bomdata']: modified = True condition = "name='" + settings['bomdata']['name'] + "'" output['message'] += 'keyword ' + keyword + ' found with value: ' +
get the half width from the peak deflection return spk_height, spk_width, half_width, deflection_range if not interp_factor: from ..analysis.parameters import interp_factor interp_factor = interp_factor self.avg_wf = np.nanmean(self.spk_wf, axis=0) self.wf_ts = np.arange(0, self.avg_wf.shape[0]) / sample_rate[self.format] * 1E3 # x-axis in ms if interpolate: # interpolate the waveform to increase sampling frequency from scipy import interpolate f = interpolate.interp1d(self.wf_ts, self.avg_wf) wf_ts_interp = np.arange(0, self.wf_ts[-1], ((self.wf_ts[1] - self.wf_ts[0]) * (1 / interp_factor))) assert (np.diff(wf_ts_interp)[0] * interp_factor) == np.diff(self.wf_ts)[0] avg_wf_interp = f(wf_ts_interp) # use interpolation function returned by `interp1d` # Replace the original value with interpolated ones self.wf_ts_interp = wf_ts_interp self.avg_wf_interp = avg_wf_interp spk_height, spk_width, half_width, deflection_range = _get_spk_profile(wf_ts_interp, avg_wf_interp) else: spk_height, spk_width, half_width, deflection_range = _get_spk_profile(self.wf_ts, self.avg_wf) self.spk_height = round(spk_height, 3) # in microvolts self.spk_width = round(spk_width, 3) # in microseconds self.half_width = half_width self.deflection_range = deflection_range # the range where half width was calculated # print("avg_wf, spk_height (uv), spk_width (us), wf_ts (ms) added") def get_conditional_spk(self) -> dict: """Get spike timestamps from different contexts""" conditional_spk = {} conditional_spk['U'] = [spk_ts for spk_ts, context in zip(self.spk_ts, self.contexts) if context == 'U'] conditional_spk['D'] = [spk_ts for spk_ts, context in zip(self.spk_ts, self.contexts) if context == 'D'] return conditional_spk def get_correlogram(self, ref_spk_list, target_spk_list, normalize=False) -> dict: """Get auto- or cross-correlogram""" from ..analysis.parameters import spk_corr_parm import math correlogram = {} for social_context in set(self.contexts): # Compute spk correlogram corr_temp = np.zeros(len(spk_corr_parm['time_bin'])) for ref_spks, target_spks, context in zip(ref_spk_list, target_spk_list, self.contexts): if context == social_context: for ref_spk in ref_spks: for target_spk in target_spks: diff = target_spk - ref_spk # time difference between two spikes if (diff) and (diff <= spk_corr_parm['lag'] and diff >= -spk_corr_parm['lag']): if diff < 0: ind = np.where(spk_corr_parm['time_bin'] <= -math.ceil(abs(diff)))[0][-1] elif diff > 0: ind = np.where(spk_corr_parm['time_bin'] >= math.ceil(diff))[0][0] # print("diff = {}, bin index = {}".format(diff, spk_corr_parm['time_bin'][ind])) # for debugging corr_temp[ind] += 1 # Make sure the array is symmetrical first_half = np.fliplr([corr_temp[:int((spk_corr_parm['lag'] / spk_corr_parm['bin_size']))]])[0] second_half = corr_temp[int((spk_corr_parm['lag'] / spk_corr_parm['bin_size'])) + 1:] assert np.sum(first_half - second_half) == 0 # Normalize correlogram by the total sum (convert to probability density ) if normalize: corr_temp /= np.sum(correlogram) correlogram[social_context] = corr_temp correlogram['parameter'] = spk_corr_parm # store parameters in the dictionary return correlogram def jitter_spk_ts(self, shuffle_limit, reproducible=True): """ Add a random temporal jitter to the spike Parameters ---------- shuffle_limit : int shuffling limit (in ms) e.g., If set to 5, any integer values between -5 to 5 drawn from uniform distribution will be added to the spike timestamp reproducible : bool make the results reproducible by setting the seed as equal to index """ spk_ts_jittered_list = [] for ind, spk_ts in enumerate(self.spk_ts): np.random.seed() if reproducible: # randomization seed seed = ind np.random.seed(seed) # make random jitter reproducible else: seed = np.random.randint(len(self.spk_ts), size=1) np.random.seed(seed) # make random jitter reproducible nb_spk = spk_ts.shape[0] jitter = np.random.uniform(-shuffle_limit, shuffle_limit, nb_spk) spk_ts_jittered_list.append(spk_ts + jitter) self.spk_ts_jittered = spk_ts_jittered_list def get_jittered_corr(self) -> dict: """Get spike correlogram from time-jittered spikes""" from ..analysis.parameters import corr_shuffle from collections import defaultdict correlogram_jitter = defaultdict(list) for iter in range(corr_shuffle['shuffle_iter']): self.jitter_spk_ts(corr_shuffle['shuffle_limit']) corr_temp = self.get_correlogram(self.spk_ts_jittered, self.spk_ts_jittered) # Combine correlogram from two contexts for key, value in corr_temp.items(): if key != 'parameter': try: correlogram_jitter[key].append(value) except: correlogram_jitter[key] = value # Convert to array for key, value in correlogram_jitter.items(): correlogram_jitter[key] = (np.array(value)) return correlogram_jitter def get_isi(self, add_premotor_spk=False): """ Get inter-spike interval Parameters ---------- add_premotor_spk : bool Add spikes from the premotor window for calculation """ isi_dict = {} list_zip = zip(self.onsets, self.offsets, self.spk_ts) if not add_premotor_spk: # Include spikes from the pre-motif buffer for calculation # Pre-motor spikes are included in spk_list by default spk_list = [] for onset, offset, spks in list_zip: onset = np.asarray(list(map(float, onset))) offset = np.asarray(list(map(float, offset))) spk_list.append(spks[np.where((spks >= onset[0]) & (spks <= offset[-1]))]) for context1 in set(self.contexts): if not add_premotor_spk: spk_list_context = [spk_ts for spk_ts, context2 in zip(spk_list, self.contexts) if context2 == context1] else: spk_list_context = [spk_ts for spk_ts, context2 in zip(self.spk_ts, self.contexts) if context2 == context1] isi_dict[context1] = get_isi(spk_list_context) return isi_dict @property def nb_files(self) -> dict: """ Return the number of files per context Returns ------- nb_files : dict Number of files per context ('U', 'D', 'All') """ nb_files = {} nb_files['U'] = len([context for context in self.contexts if context == 'U']) nb_files['D'] = len([context for context in self.contexts if context == 'D']) nb_files['All'] = nb_files['U'] + nb_files['D'] return nb_files def nb_bouts(self, song_note: str) -> dict: """ Return the number of bouts per context Parameters ---------- song_note : str song motif syllables Returns ------- nb_bouts : dict """ from ..analysis.functions import get_nb_bouts nb_bouts = {} syllable_list = [syllable for syllable, context in zip(self.syllables, self.contexts) if context == 'U'] syllables = ''.join(syllable_list) nb_bouts['U'] = get_nb_bouts(song_note, syllables) syllable_list = [syllable for syllable, context in zip(self.syllables, self.contexts) if context == 'D'] syllables = ''.join(syllable_list) nb_bouts['D'] = get_nb_bouts(song_note, syllables) nb_bouts['All'] = nb_bouts['U'] + nb_bouts['D'] return nb_bouts def nb_motifs(self, motif: str) -> dict: """ Return the number of motifs per context Parameters ---------- motf : str Song motif (e.g., 'abcd') Returns ------- nb_motifs : dict """ from ..utils.functions import find_str nb_motifs = {} syllable_list = [syllable for syllable, context in zip(self.syllables, self.contexts) if context == 'U'] syllables = ''.join(syllable_list) nb_motifs['U'] = len(find_str(syllables, motif)) syllable_list = [syllable for syllable, context in zip(self.syllables, self.contexts) if context == 'D'] syllables = ''.join(syllable_list) nb_motifs['D'] = len(find_str(syllables, motif)) nb_motifs['All'] = nb_motifs['U'] + nb_motifs['D'] return nb_motifs def get_note_info(self, target_note, pre_buffer=0, post_buffer=0 ): """ Obtain a class object (NoteInfo) for individual note spikes will be collected from note onset (+- pre_buffer) to offset (+- post_buffer) Parameters ---------- target_note : str Get information from this note pre_buffer : int Amount of time buffer relative to the event onset (e.g., syllable onset) post_buffer : int Amount of time buffer relative to the event offset (e.g., syllable onset) Returns ------- NoteInfo : class object """ from ..utils.functions import find_str syllables = ''.join(self.syllables) onsets = np.hstack(self.onsets) offsets = np.hstack(self.offsets) durations = np.hstack(self.durations) contexts = '' for i in range(len(self.contexts)): # concatenate contexts contexts += self.contexts[i] * len(self.syllables[i]) ind = np.array(find_str(syllables, target_note)) # get note indices if not ind.any(): # skil if the note does not exist return note_onsets = np.asarray(list(map(float, onsets[ind]))) note_offsets = np.asarray(list(map(float, offsets[ind]))) note_durations = np.asarray(list(map(float, durations[ind]))) note_contexts = ''.join(np.asarray(list(contexts))[ind]) # Get the note that immeidately follows next_notes = '' for i in ind: next_notes += syllables[i + 1] # Get spike info spk_ts = np.hstack(self.spk_ts) note_spk_ts_list = [] for onset, offset in zip(note_onsets, note_offsets): note_spk_ts_list.append( spk_ts[np.where((spk_ts >= onset - pre_buffer) & (spk_ts <= offset + post_buffer))]) # Organize data into a dictionary note_info = { 'note': target_note, 'next_notes' : next_notes, 'onsets': note_onsets, 'offsets': note_offsets, 'durations': note_durations, 'contexts': note_contexts, 'median_dur': np.median(note_durations, axis=0), 'spk_ts': note_spk_ts_list, 'path': self.path, # directory where the data exists 'pre_buffer' : pre_buffer, 'post_buffer' : post_buffer } return NoteInfo(note_info) # return note info @property def open_folder(self) -> None: """Open the data folder""" from ..utils.functions import open_folder open_folder(self.path) class NoteInfo: """ Class for storing information about a single note syllable and its associated spikes """ def __init__(self, note_dict): # Set the dictionary values to class attributes for key in note_dict: setattr(self, key, note_dict[key]) # Perform PLW (piecewise linear warping) self.spk_ts_warp = self._piecewise_linear_warping() def __repr__(self): return str([key for key in self.__dict__.keys()]) def select_index(self, index) -> None: """ Select only the notes with the matching index Parameters ---------- index : np.array or list Note indices to keep """ if isinstance(index, list): index = np.array(index) self.contexts = ''.join(np.array(list(self.contexts))[index]) self.onsets, self.offsets, self.durations, self.spk_ts, self.spk_ts_warp \ = self.onsets[index], self.offsets[index], self.durations[index], self.spk_ts[index], self.spk_ts_warp[index] def select_context(self, target_context : str, keep_median_duration=True ) -> None: """ Select one context Parameters ---------- target_context : str 'U' or 'D' keep_median_duration : bool Normally medial note duration is calculated using all syllables regardless of the context one may prefer to use this median to reduce variability when calculating pcc if set False, new median duration will be calculated using the selected notes """ zipped_list = \ list(zip(self.contexts, self.next_notes,
= district_data[i][5] # Year of construction curr_mod_year = district_data[i][ 6] # optional (last year of modernization) curr_th_e_demand = district_data[i][ 7] # optional: Final thermal energy demand in kWh # For residential buildings: Space heating only! # For non-residential buildings: Space heating AND hot water! (SLP) curr_el_e_demand = district_data[i][ 8] # optional (Annual el. energy demand in kWh) curr_pv_roof_area = district_data[i][ 9] # optional (Usable pv roof area in m2) curr_nb_of_apartments = district_data[i][ 10] # optional (Number of apartments) curr_nb_of_occupants = district_data[i][ 11] # optional (Total number of occupants) curr_nb_of_floors = district_data[i][ 12] # optional (Number of floors above the ground) curr_avg_height_of_floors = district_data[i][ 13] # optional (Average Height of floors) curr_central_ahu = district_data[i][ 14] # optional (If building has a central air handling unit (AHU) or not (boolean)) curr_res_layout = district_data[i][ 15] # optional Residential layout (int, optional, e.g. 0 for compact) curr_nb_of_neighbour_bld = district_data[i][ 16] # optional Neighbour Buildings (int, optional) curr_type_attic = district_data[i][ 17] # optional Type of attic (int, optional, e.g. 0 for flat roof); # 1 - Roof, non heated; 2 - Roof, partially heated; 3- Roof, fully heated; curr_type_cellar = district_data[i][ 18] # optional Type of basement # (int, optional, e.g. 1 for non heated basement 0 - No basement; 1 - basement, non heated; 2 - basement, partially heated; 3- basement, fully heated; curr_dormer = district_data[i][ 19] # optional Dormer (int, optional, 0: no dormer/ 1: dormer) curr_construction_type = district_data[i][ 20] # optional Construction Type(heavy/light, optional) (0 - heavy; 1 - light) curr_method_3_nb = district_data[i][ 21] # optional Method_3_nb (for usage of measured, weekly non-res. el. profile curr_method_4_nb = district_data[i][ 22] # optional Method_4_nb (for usage of measured, annual non-res. el. profile else: # Single entry # Extract data out of input file curr_id = int(district_data[0]) # id / primary key of building curr_x = district_data[1] # x-coordinate in m curr_y = district_data[2] # y-coordinate in m curr_build_type = int( district_data[3]) # building type nb (int) curr_nfa = district_data[4] # Net floor area in m2 curr_build_year = district_data[5] # Year of construction curr_mod_year = district_data[ 6] # optional (last year of modernization) curr_th_e_demand = district_data[ 7] # optional: Final thermal energy demand in kWh # For residential buildings: Space heating only! # For non-residential buildings: Space heating AND hot water! (SLP) curr_el_e_demand = district_data[ 8] # optional (Annual el. energy demand in kWh) curr_pv_roof_area = district_data[ 9] # optional (Usable pv roof area in m2) curr_nb_of_apartments = district_data[ 10] # optional (Number of apartments) curr_nb_of_occupants = district_data[ 11] # optional (Total number of occupants) curr_nb_of_floors = district_data[ 12] # optional (Number of floors above the ground) curr_avg_height_of_floors = district_data[ 13] # optional (Average Height of floors) curr_central_ahu = district_data[ 14] # optional (If building has a central air handling unit (AHU) or not (boolean)) curr_res_layout = district_data[ 15] # optional Residential layout (int, optional, e.g. 0 for compact) curr_nb_of_neighbour_bld = district_data[ 16] # optional Neighbour Buildings (int, optional) curr_type_attic = district_data[ 17] # optional Type of attic (int, optional, e.g. 0 for flat roof); # 1 - Roof, non heated; 2 - Roof, partially heated; 3- Roof, fully heated; curr_type_cellar = district_data[ 18] # optional Type of basement # (int, optional, e.g. 1 for non heated basement 0 - No basement; 1 - basement, non heated; 2 - basement, partially heated; 3- basement, fully heated; curr_dormer = district_data[ 19] # optional Dormer (int, optional, 0: no dormer/ 1: dormer) curr_construction_type = district_data[ 20] # optional Construction Type(heavy/light, optional) (0 - heavy; 1 - light) curr_method_3_nb = district_data[ 21] # optional Method_3_nb (for usage of measured, weekly non-res. el. profile curr_method_4_nb = district_data[ 22] # optional Method_4_nb (for usage of measured, annual non-res. el. profile print('Process building', curr_id) print('########################################################') # Assert functions # ############################################################ assert curr_build_type >= 0 assert curr_nfa > 0 for m in range(5, 9): if multi_data: if district_data[i][m] is not None: assert district_data[i][m] > 0 else: if district_data[m] is not None: assert district_data[m] > 0 if curr_nb_of_apartments is not None: assert curr_nb_of_apartments > 0 # Convert to int curr_nb_of_apartments = int(curr_nb_of_apartments) if curr_nb_of_occupants is not None: assert curr_nb_of_occupants > 0 # Convert curr_nb_of_occupants from float to int curr_nb_of_occupants = int(curr_nb_of_occupants) if (curr_nb_of_occupants is not None and curr_nb_of_apartments is not None): assert curr_nb_of_occupants / curr_nb_of_apartments <= 5, ( 'Average share of occupants per apartment should ' + 'not exceed 5 persons! (Necessary for stochastic, el.' + 'profile generation.)') if curr_method_3_nb is not None: curr_method_3_nb >= 0 if curr_method_4_nb is not None: curr_method_4_nb >= 0 if curr_build_type == 0 and curr_nb_of_apartments is None: # pragma: no cover # Define single apartment, if nb of apartments is unknown msg = 'Building ' + str(curr_id) + ' is residential, but' \ ' does not have a number' \ ' of apartments. Going' \ ' to set nb. to 1.' warnings.warn(msg) curr_nb_of_apartments = 1 if (curr_build_type == 0 and curr_nb_of_occupants is None and use_dhw and dhw_method == 2): raise AssertionError('DHW profile cannot be generated' + 'for residential building without' + 'occupants (stochastic mode).' + 'Please check your input file ' + '(missing number of occupants) ' + 'or disable dhw generation.') # Check if TEASER inputs are defined if call_teaser or th_gen_method == 3: if curr_build_type == 0: # Residential assert curr_nb_of_floors is not None assert curr_avg_height_of_floors is not None assert curr_central_ahu is not None assert curr_res_layout is not None assert curr_nb_of_neighbour_bld is not None assert curr_type_attic is not None assert curr_type_cellar is not None assert curr_dormer is not None assert curr_construction_type is not None if curr_nb_of_floors is not None: assert curr_nb_of_floors > 0 if curr_avg_height_of_floors is not None: assert curr_avg_height_of_floors > 0 if curr_central_ahu is not None: assert 0 <= curr_central_ahu <= 1 if curr_res_layout is not None: assert 0 <= curr_res_layout <= 1 if curr_nb_of_neighbour_bld is not None: assert 0 <= curr_nb_of_neighbour_bld <= 2 if curr_type_attic is not None: assert 0 <= curr_type_attic <= 3 if curr_type_cellar is not None: assert 0 <= curr_type_cellar <= 3 if curr_dormer is not None: assert 0 <= curr_dormer <= 1 if curr_construction_type is not None: assert 0 <= curr_construction_type <= 1 # Check building type (residential or non residential) # #------------------------------------------------------------- if curr_build_type == 0: # Is residential print('Residential building') # Get spec. net therm. demand value according to last year # of modernization or build_year # If year of modernization is defined, use curr_mod_year if curr_mod_year is not None: use_year = int(curr_mod_year) else: # Use year of construction use_year = int(curr_build_year) # Get specific, thermal energy demand (based on use_year) for j in range(len(start_year_column)): if use_year >= start_year_column[j]: curr_spec_th_demand = spec_th_dem_res_building[len( spec_th_dem_res_building) - 1 - j][2] break # # Get spec. electr. demand # if curr_nb_of_occupants is None: # # USE AGEB values, if no number of occupants is given # # Set specific demand value in kWh/m2*a # curr_spec_el_demand = spec_el_dem_res_building[1] # # Only valid for array like [2012 38.7] # else: # # Use Stromspiegel 2017 values # # Calculate specific electric demand values depending # # on number of occupants # # if curr_nb_of_apartments == 1: # btype = 'sfh' # elif curr_nb_of_apartments > 1: # btype = 'mfh' # # # Average occupancy number per apartment # curr_av_occ_per_app = \ # curr_nb_of_occupants / curr_nb_of_apartments # print('Average number of occupants per apartment') # print(round(curr_av_occ_per_app, ndigits=2)) # # if curr_av_occ_per_app <= 5 and curr_av_occ_per_app > 0: # # Correctur factor for non-int. av. number of # # occupants (#19) # # # Divide annual el. energy demand with net floor area # if btype == 'sfh': # row_idx_low = math.ceil(curr_av_occ_per_app) - 1 # row_idx_high = math.floor(curr_av_occ_per_app) - 1 # elif btype == 'mfh': # row_idx_low = math.ceil(curr_av_occ_per_app) - 1 \ # + 5 # row_idx_high = math.floor(curr_av_occ_per_app) - 1 \ # + 5 # #
an int' assert type(exclude) == list, 'Excluded cards must be in a list' assert type(include) == list, 'Included cards must be in a list' cards = copy.deepcopy(self._stack) indexes = [] for i in range(len(cards)): if cards[i] in exclude: indexes.append(i) c = 0 for i in indexes: del cards[i-c] c += 1 for c in include: cards.addCard(c) cards.sortBySuit() groups = [] i = 0 while i < len(cards) - n + 1: flag = True g = [] for j in range(i, i + n - 1): if cards[j].getRank() == cards[j+1].getRank() - 1 and cards[j].getSuit() == cards[j+1].getSuit(): g.append(cards[j]) if j == i+n-2: g.append(cards[j+1]) else: flag = False break if flag: i += n groups.append(g) else: i += 1 return groups # Returns a list of distincs n-sized books in the player's stack # Books are found after removing cards present in the exclude list and adding cards present in the include list def getBooks(self, n, exclude, include): assert type(n) == int, 'Size of books must be an int' assert type(exclude) == list, 'Excluded cards must be in a list' assert type(include) == list, 'Included cards must be in a list' cards = copy.deepcopy(self._stack) indexes = [] for i in range(len(cards)): if cards[i] in exclude: indexes.append(i) c = 0 for i in indexes: del cards[i-c] c += 1 for c in include: cards.addCard(c) cards.sortByRank() groups = [] i = 0 while i < len(cards) - n + 1: g = [] flag = True for j in range(i, i+n-1): if cards[j].getRank() == cards[j+1].getRank(): g.append(cards[j]) if j == i+n-2: g.append(cards[j+1]) else: flag = False break if flag: i += n groups.append(g) else: i += 1 return groups # Adds a card to player's stack def addCard(self, card): self._stack.addCard(card) # Returns a card in player's stack at the given index def getCard(self, i): assert type(i) == int, 'Card index must be an int' return copy.deepcopy(self._stack[i]) # Removes the card in player's stack at the given index def removeCard(self, i): assert type(i) == int, 'Card index must be an int' del self._stack[i] # Returns the number of cards the player currently possess def getCurrentCardsNum(self): return len(self._stack) # Checks if the computer player should choose the given card from the pile or choose a card from the deck # Returns a boolean value where True denotes choosing the given pile card def chooseCard(self, pileCard): assert self._comp == True, 'Only the computer player can choose a card' start4Books = len(self.getBooks(4, [], [])) end4Books = len(self.getBooks(4, [], [pileCard])) start4Runs = len(self.getRuns(4, [], [])) end4Runs = len(self.getRuns(4, [], [pileCard])) start3Books = len(self.getBooks(3, [], [])) end3Books = len(self.getBooks(3, [], [pileCard])) start3Runs = len(self.getRuns(3, [], [])) end3Runs = len(self.getRuns(3, [], [pileCard])) start2Runs = len(self.getRuns(2, [], [])) end2Runs = len(self.getRuns(2, [], [pileCard])) start2Books = len(self.getBooks(2, [], [])) end2Books = len(self.getBooks(2, [], [pileCard])) if self.hasWon([], [pileCard]): return True fourRunsUtility = 0 if start4Runs == 0 and end4Runs > 0: fourRunsUtility = 2 fourBooksUtility = 0 if start4Books == 0 and end4Books > 0 and start4Runs == 0 and end4Runs == 0: fourBooksUtility = 4 runsUtility = 0 if start3Runs < 2: runsUtility = (end3Runs - start3Runs) * 2 * (2 - start4Runs) booksUtility = 0 if end3Books > start3Books: if start4Books == 0: booksUtility = 2 elif start3Books == 0: booksUtility = 1 if start4Books == 0 and start3Books == 0 and start2Books < end2Books: booksUtility += 1 if start4Books == 0 and start2Books < end2Books: booksUtility += 0.5 if start4Runs == 0 and start3Runs == 0 and start2Runs < end2Runs: runsUtility += 1 if start4Runs == 0 and start2Runs < end2Runs: runsUtility += 0.5 return booksUtility + fourBooksUtility + runsUtility + fourRunsUtility > 2 def _getCardUtil(self, card): assert self._comp == True, 'Only the computer player can choose a card' if self.hasWon([], []) and not self.hasWon([card], []): return 100 start4Books = len(self.getBooks(4, [], [])) end4Books = len(self.getBooks(4, [card], [])) start4Runs = len(self.getRuns(4, [], [])) end4Runs = len(self.getRuns(4, [card], [])) start3Books = len(self.getBooks(3, [], [])) end3Books = len(self.getBooks(3, [card], [])) start3Runs = len(self.getRuns(3, [], [])) end3Runs = len(self.getRuns(3, [card], [])) start2Runs = len(self.getRuns(2, [], [])) end2Runs = len(self.getRuns(2, [card], [])) start2Books = len(self.getBooks(2, [], [])) end2Books = len(self.getBooks(2, [card], [])) fourBooksUtility = 0 if start4Books == 1 and end4Books == 0: fourBooksUtility = 5 elif start4Books < end4Books: fourBooksUtility = 1 fourRunsUtility = 0 if start4Runs == 0 and end4Runs > 0: fourRunsUtility = 4 runsUtility = 0 if end3Runs < 2 and end3Runs < start3Runs: runsUtility = (start3Runs - end3Runs) * 2 elif end3Runs < start3Runs: runsUtility = 1 booksUtility = 0 if end3Books < start3Books: if start4Books == 0: booksUtility = 2 elif end3Books == 1: booksUtility = 1 if start4Runs == 0 and start3Runs == 0 and start2Runs < end2Runs: runsUtility += 1 if start4Runs == 0 and start2Runs < end2Runs: runsUtility += 0.5 if start4Books == 0 and start3Books == 0 and start2Books < end2Books: booksUtility += 1 if start4Books == 0 and start2Books < end2Books: booksUtility += 0.5 return booksUtility + runsUtility + fourBooksUtility # Returns the index of the card which the computer player should remove to maximize probabilty of winning # Removes the card with the minimum utility def cardToRemove(self): minUtil, minUtilIndex = -1, -1 for i in range(len(self._stack)): if i == 0: minUtil = self._getCardUtil(self._stack[i]) minUtilIndex = i else: util = self._getCardUtil(self._stack[i]) if util <= minUtil: minUtil = util minUtilIndex = i return minUtilIndex # Checks if the player has won, returns boolean def hasWon(self, exclude, include): fourBooks = self.getBooks(4, exclude, include) fourRuns = self.getRuns(4, exclude, include) if len(fourBooks) >= 1: allBooks = fourBooks[0] runs = self.getRuns(3, allBooks + exclude, include) books = self.getBooks(3, allBooks + exclude, include) booksWithoutRuns = self.getBooks(3, allBooks+flattenList(runs)+exclude, include) runsWithoutBooks = self.getRuns(3, allBooks+flattenList(books)+exclude, include) if len(runs) >= 2 or (len(books) == 1 and len(runsWithoutBooks) == 1) or (len(runs) == 1 and len(booksWithoutRuns) == 1): return True elif len(fourBooks) == 2: allBooks = fourBooks[1] runs = self.getRuns(3, allBooks, include) books = self.getBooks(3, allBooks, include) booksWithoutRuns = self.getBooks(3, allBooks+flattenList(runs)+exclude, include) runsWithoutBooks = self.getRuns(3, allBooks+flattenList(books)+exclude, include) if len(runs) >= 2 or (len(books) == 1 and len(runsWithoutBooks) == 1) or (len(runs) == 1 and len(booksWithoutRuns) == 1): return True else: return False else: return False elif len(fourRuns) >= 1: allRuns = fourRuns[0] runs = self.getRuns(3, allRuns + exclude, include) books = self.getBooks(3, allRuns + exclude, include) booksWithoutRuns = self.getBooks(3, allRuns+flattenList(runs)+exclude, include) runsWithoutBooks = self.getRuns(3, allRuns+flattenList(books)+exclude, include) if len(runs) >= 2 or (len(books) == 1 and len(runsWithoutBooks) == 1) or (len(runs) == 1 and len(booksWithoutRuns) == 1): return True elif len(fourRuns) == 2: allRuns = fourRuns[1] runs = self.getRuns(3, allRuns, include) books = self.getBooks(3, allRuns, include) booksWithoutRuns = self.getBooks(3, allRuns+flattenList(runs)+exclude, include) runsWithoutBooks = self.getRuns(3, allRuns+flattenList(books)+exclude, include) if len(runs) >= 2 or (len(books) == 1 and len(runsWithoutBooks) == 1) or (len(runs) == 1 and len(booksWithoutRuns) == 1): return True else: return False else: return False else: return False class Game: def __init__(self, eventHandler): global images, conf, fonts, colors self._eventHandler = eventHandler self.setInitialState() pygame.init() self.screen = pygame.display.set_mode((conf['screenWidth'], conf['screenHeight'])) pygame.display.set_caption('Indian Rummy') self.screen.fill(colors['green']) pygame.mouse.set_cursor(*pygame.cursors.broken_x) fonts['heading'] = pygame.font.Font(os.path.join('assets', 'AvenirNext.ttc'), 38) fonts['player'] = pygame.font.Font(os.path.join('assets', 'AvenirNext.ttc'), 24) fonts['hero'] = pygame.font.Font(os.path.join('assets', 'AvenirNext.ttc'), 80) music['bg'] = pygame.mixer.Sound(os.path.join('music', 'bg.ogg')) music['won'] = pygame.mixer.Sound(os.path.join('music', 'won.ogg')) music['lost'] = pygame.mixer.Sound(os.path.join('music', 'lost.ogg')) music['stack'] = pygame.mixer.Sound(os.path.join('music', 'stack.ogg')) music['card'] = pygame.mixer.Sound(os.path.join('music', 'card.ogg')) music['start'] = pygame.mixer.Sound(os.path.join('music', 'start.ogg')) for suit in Card.SUITS: for i in range(13): name = str(i+1) + suit images[name] = pygame.image.load( os.path.join('cards', name+'.png') ) images[name].convert_alpha() images[name] = pygame.transform.scale(images[name], (conf['imageWidth'], conf['imageHeight'])) images['back'] = pygame.image.load( os.path.join('cards', 'back.png') ) images['back'].convert_alpha() images['back'] = pygame.transform.scale(images['back'], (conf['imageWidth'], conf['imageHeight'])) images['none'] = pygame.image.load( os.path.join('cards', 'none.png') ) images['none'].convert_alpha() images['none'] = pygame.transform.scale(images['none'], (conf['imageWidth'], conf['imageHeight'])) def setInitialState(self): self._delayCount = 0 self._deck = CardStack() self._eventHandler.removeAllElems() self._swapSelect = -1 self._leader = None self._textBox = None for i in range(52): suit = 'H' if i % 4 == 1: suit = 'D' elif i % 4 == 2: suit = 'S' elif i % 4 == 3: suit = 'C' self._deck.addCard(Card(i//4 + 1, suit)) self._deck.shuffle() self._pile = CardStack() self._state = 'start' player1stack = CardStack() player2stack = CardStack() for i in range(Player.CARDS_NUM): player1stack.addCard(copy.deepcopy(self._deck[0])) del self._deck[0] player2stack.addCard(copy.deepcopy(self._deck[0])) del self._deck[0] self._player1 = Player(player1stack) self._player2 = Player(player2stack, True) self._eventHandler.addElem('start-btn', conf['screenWidth']//2 - 200, conf['screenHeight'] - 200, 180, 60) self._eventHandler.addElem('leader-btn', conf['screenWidth']//2 + 20, conf['screenHeight'] - 200, 180, 60) def getName(self): self._state = 'nameInput' self._eventHandler.removeAllElems() self._textBox = pygame_textinput.TextInput(font_family = os.path.join('assets', 'AvenirNext.ttc'), font_size = 30, text_color=colors['darkGreen'], cursor_color=colors['darkGreen']) def start(self, userName): global conf, music self._eventHandler.removeAllElems() self._state = 'player1stack' deckX = conf['screenWidth'] - conf['imageWidth'] - 126 pileX = 44 y = 290 self._textBox = None self._leader = LeaderRecord(userName) music['start'].play(0) self._leader.addMove() self._eventHandler.addElem('deck', deckX, y, conf['imageWidth'] + 90, conf['imageHeight']+66) self._eventHandler.addElem('pile', pileX, y, conf['imageWidth'] + 220, conf['imageHeight']) def over(self, player): assert player == 1 or player == 2, 'Invalid player number' self._swapSelect = -1 self._eventHandler.removeAllElems() self._eventHandler.addElem('restart-btn', conf['screenWidth']//2 - 180, 440, 160, 60) self._eventHandler.addElem('self-analysis-btn', conf['screenWidth']//2 + 20, 440, 160, 60) self._state = 'player'+str(player)+'win' if player == 1: music['won'].play(0) self._leader.saveToFile(True) else: music['lost'].play(0) self._leader.saveToFile(False) def showLeaderBoard(self): self._state = 'leaderboard' self._eventHandler.removeAllElems() self._eventHandler.addElem('home-btn', conf['screenWidth']//2 - 90, conf['screenHeight'] - 120, 180, 60) self._leader = getLeaderBoard() col4x, y = 4 * conf['screenWidth']//5, 175 for i in range(len(self._leader)): eventHandler.addElem('analysis-'+str(i), col4x - 70, y + 65*i + 30 , 140, 50) def update(self): if len(self._deck) == 0: self._deck = self._pile self._pile = CardStack() self._deck.shuffle() if self._state == 'player2stack' and self._delayCount == 7: if len(self._pile) != 0: addFromPile = self._player2.chooseCard(copy.deepcopy(self._pile[len(self._pile) - 1])) if addFromPile: self._player2.addCard(copy.deepcopy(self._pile[len(self._pile) - 1])) del self._pile[len(self._pile) - 1] else: self._player2.addCard(copy.deepcopy(self._deck[len(self._deck) - 1])) del self._deck[len(self._deck) - 1] else: self._player2.addCard(copy.deepcopy(self._deck[len(self._deck) - 1])) del self._deck[len(self._deck) - 1] removeIndex = self._player2.cardToRemove() removed = self._player2.getCard(removeIndex) self._pile.addCard(removed) self._player2.removeCard(removeIndex) self._state = 'player1stack' self._delayCount = 0 if self._player2.hasWon([], []): self.over(2) return self._leader.addMove() return elif self._state == 'player2stack': self._delayCount += 1 elif self._state == 'nameInput': if self._textBox.update(pygame.event.get()): name = self._textBox.get_text().strip() if len(name) > 0: self.start(name) for e in pygame.event.get(): if e.type == pygame.QUIT: game.close() if e.type == pygame.MOUSEBUTTONDOWN: x, y = pygame.mouse.get_pos() elem = self._eventHandler.getClick(x, y) if self._state == 'player1stack': if elem == 'deck' and len(self._deck) > 0: self._player1.addCard(copy.deepcopy(self._deck[len(self._deck) - 1])) del self._deck[len(self._deck) - 1] self._state = 'player1card' self._swapSelect = -1 self._leader.setMoveStack('deck') music['stack'].play(0) break elif elem == 'pile' and len(self._pile) > 1: self._player1.addCard(copy.deepcopy(self._pile[len(self._pile) - 1])) del self._pile[len(self._pile) - 1] self._state = 'player1card' self._leader.setMoveStack('pile') music['stack'].play(0) self._swapSelect = -1 break elif self._state ==
<reponame>unitedstates/inspectors-general<filename>inspectors/peacecorps.py #!/usr/bin/env python import datetime import logging import os import urllib from utils import utils, inspector, admin # https://www.peacecorps.gov/about/inspector-general/ archive = 1989 # options: # standard since/year options for a year range to fetch from. # # Notes for IG's web team: # REPORTS_URL = "https://www.peacecorps.gov/about/inspector-general/reports/?page=%d" REPORT_PUBLISHED_MAPPING = { "Case_Study_-_PC_Philippines_-_2006": datetime.datetime(2006, 9, 1), "Case_Study_-_PC_Panama_-_2006": datetime.datetime(2006, 9, 1), "Case_Study_-_PC_Paraguay_-_2006": datetime.datetime(2006, 9, 1), "Case_Study_-_The_Counterparts_Perspective": datetime.datetime(2006, 9, 1), "Case_Study_-_PC_Ukraine_-_2006": datetime.datetime(2006, 9, 1), "Case_Study_-_PC_Georgia_-_2006": datetime.datetime(2006, 9, 1), "Case_Study_-_PC_Honduras_-_2006": datetime.datetime(2006, 9, 1), "Case_Study_-_PC_Malawi_-_2006": datetime.datetime(2006, 9, 1), "Case_Study_-_PC_Mauritania_-_2006": datetime.datetime(2006, 9, 1), "Case_Study_-_PC_Niger_-_2006": datetime.datetime(2006, 9, 1), "Death_Inquiry_and_Assessment_of_Medical_Care_in_Peace_Corps_Morocco": datetime.datetime(2010, 2, 1), "Management_and_Performance_Challenges_2012": datetime.datetime(2012, 10, 26), "SARC_20130331": datetime.datetime(2013, 5, 22), "PCIG_SARC_20130930": datetime.datetime(2013, 12, 5), "Burkina_Faso_Medical_Supply_Management_Advisory_Report": datetime.datetime(2013, 3, 14), "PCIG_Final_MAR_Certification_of_Volunteer_Payments": datetime.datetime(2013, 9, 24), "MAR_Cost_Savings_Opportunity_on_Value_Added_Tax": datetime.datetime(2013, 2, 13), "Management_Advisory_Report-Peace_Corps_Drug_Free_Workplace_Program": datetime.datetime(2012, 8, 16), "PCIG_2014_Peace_Corps_OIG_Peer_Review_Final": datetime.datetime(2014, 3, 27), "MAR_Sierra_Leone": datetime.datetime(2013, 3, 14), "Capstone_Report_2012_Medical_Inventory_Issues_Final": datetime.datetime(2013, 8, 26), "PCIG_Capstone_Report_Billing_and_Collection_Process": datetime.datetime(2014, 9, 30), "PCIG_SARC_201400930": datetime.datetime(2014, 11, 19), "PC_Morocco_Assessment_of_Medical_Care": datetime.datetime(2010, 2, 1), "PCIG_New_Country_Entries_Lessons_Learned_Final_Report": datetime.datetime(2014, 9, 30), "PC_Recurring_Issues_OIG_Post_Audits_Evaluations_FYs_2009-2011": datetime.datetime(2012, 4, 1), "PC_Vanuatu_SR_Advice_and_Assistance": datetime.datetime(2010, 5, 12), "PC_Gambia_SR_Grant_Activities": datetime.datetime(2010, 5, 14), "PC_Ecuador_Special_Review_IG1005SR": datetime.datetime(2010, 9, 1), "PCIG_Agency_Policies_Related_to_Volunteer_Sexual_Assault_Allegations": datetime.datetime(2014, 11, 21), "PCIG_Investigative_Review_of_a_Volunteer_Death_in_Peace_Corps_China": datetime.datetime(2014, 11, 1), "PCIG_Agency_Response_to_the_China_Investigative_Review_Nov_2014": datetime.datetime(2015, 1, 23), "PCIG_MAR_Peace_Corps_Cloud_Computing_Pilot_Program": datetime.datetime(2015, 3, 17), "MAR_Peace_Corps_Volunteer_Health_Care_Administration_Contract": datetime.datetime(2015, 3, 31), "Management_Implication_Report_Peace_Corps_Paraguays_Inappropriate_Use_of_Cooperative_Agreements_to_Obligate_the_Government": datetime.datetime(2010, 3, 15), "MAR_Mitigating_a_Potential_Electrica_Safety_Hazard_Redacted_2": datetime.datetime(2011, 5, 17), "Safety_and_security_weaknesses_in PC_Cameroon": datetime.datetime(2012, 7, 31), "Peace_Corps_Gambia_Grant_Activities": datetime.datetime(2010, 5, 14), "OIG_Investigations_have_Disclosed_Improper_Vehicle_Disposal_Practices_and_Vehicle_Sales_that_do_not_generate_Fair_Market_Returns": datetime.datetime(2010, 3, 30), "Management_and_Performance_Challenges_2014": datetime.datetime(2014, 11, 3), "management-performance-challenges-fy2015": datetime.datetime(2015, 12, 3), "Healthcare_Benefits_Administration_Contract_Audit": datetime.datetime(2016, 1, 21), "Kyrgyz_Republic_Audit_Final_Report": datetime.datetime(2016, 1, 15), "PCIG_Cameroon_Final_Audit_Report": datetime.datetime(2015, 1, 14), "PCIG_Final_Follow-up_Audit_Report_of_the_Peace_Corps_Safety_and_Security_Program": datetime.datetime(2015, 3, 12), "Final_Audit_Report_Guyana-2015": datetime.datetime(2015, 8, 5), "PCIG_Nepal_Final_Audit": datetime.datetime(2015, 2, 5), "Madagascar_Final_Audit": datetime.datetime(2015, 4, 30), "Vanuatu_Audit_Report_Final": datetime.datetime(2015, 9, 29), "PCIG_Armenia_Final_Audit_Report": datetime.datetime(2014, 2, 20), "PCIG_SARC_20140331": datetime.datetime(2014, 5, 22), "PCIG_Dominican_Republic_Final_Audit_Report": datetime.datetime(2014, 9, 30), "PCIG_The_Gambia_Final_Audit_Report": datetime.datetime(2014, 9, 15), "Management_and_Performance_Challenges_2013": datetime.datetime(2013, 11, 27), "PCIG_Macedonia_Final_Audit_Report": datetime.datetime(2013, 12, 3), "Memo_Increased_Use_of_Premium_Class_Travel_IG-13-0000": datetime.datetime(2013, 12, 19), "PCIG_Final_Audit_Report_Applicant_Screening_Process": datetime.datetime(2014, 6, 10), "PCIG_Final_Audit_Report_Peace_Corps_Overseas_Staffing": datetime.datetime(2013, 11, 21), "PCIG_Final_Report_on_the_Review_of_PC_Management_of_Grants": datetime.datetime(2013, 3, 28), "PCIG_Jamaica_Final_Audit_Report": datetime.datetime(2013, 7, 3), "PC_Malawi_Final_Audit_Report_IG1302A": datetime.datetime(2013, 2, 27), "PC_Final_Audit_Report_The_Peace_Corps_50th_Anniversary_Program_IG1301A": datetime.datetime(2012, 10, 25), "PCIG_South_Africa_Final_Audit_Report": datetime.datetime(2013, 3, 18), "PCIG_Zambia_Final_Audit_Report": datetime.datetime(2013, 9, 27), "PC_Costa_Rica_Final_Audit_Report_IG1203A": datetime.datetime(2012, 3, 9), "PC_Final_Audit_Report_Jordan_IG1207": datetime.datetime(2012, 9, 25), "PC_Lesotho_Final_Audit_Report_IG1205A": datetime.datetime(2012, 6, 29), "PC_Limited_Scope_Audit_of_Peace_Corps_China": datetime.datetime(2012, 8, 1), "PC_Mali_Final_Audit_Report_IG1204A": datetime.datetime(2012, 3, 22), "PC_Final_Audit_Report_Mid-Atlantic_RRO_IG1201A": datetime.datetime(2011, 10, 11), "PC_Final_Audit_Report_Budget_Formulation_Process_IG1202A": datetime.datetime(2012, 2, 14), "PC_Final_Audit_Report-PC-Tonga-IG1208A": datetime.datetime(2012, 9, 28), "PC_Albania_Final_Audit_Report_IG1107A": datetime.datetime(2011, 6, 21), "PC_Belize_Final_Audit_Report_IG1104A": datetime.datetime(2011, 3, 1), "PC_Ethiopia_Final_Audit_Report_IG1102A": datetime.datetime(2011, 2, 1), "PC_Mexico_Final_Audit_Report_IG1101A": datetime.datetime(2011, 2, 9), "PC_Mozambique_Final_Audit_Report_IG1105A": datetime.datetime(2011, 3, 31), "PC_Panama_Final_Audit_Report_IG1109A": datetime.datetime(2011, 9, 15), "PC_Rwanda_Final_Audit_Report_IG1108A": datetime.datetime(2011, 9, 12), "PC_Togo_Final_Audit_Report_IG1103A": datetime.datetime(2011, 3, 3), "Memo_Data_Center_AC_Failure_IG-10-0000": datetime.datetime(2010, 6, 3), "Management_and_Performance_Challenges_2010": datetime.datetime(2010, 10, 12), "OIG_Role_in_responding_to_the_Death_of_a_Volunteer": datetime.datetime(2010, 11, 5), "PC_Ukraine_Final_Audit_Report_IG1106A": datetime.datetime(2011, 3, 31), "PC_Burkina_Faso_Final_Audit_Report_IG1001A": datetime.datetime(2009, 10, 1), "PC_Cape_Verde_Final_Audit_Report_IG1003A": datetime.datetime(2009, 12, 1), "PC_Kenya_Final_Audit_Report_IG1012A": datetime.datetime(2010, 9, 1), "PC_Moldova_Final_Audit_Report_IG1011A": datetime.datetime(2010, 8, 1), "PC_Mongolia_Final_Audit_Report_IG1007A": datetime.datetime(2010, 2, 1), "PC_OCIO_Final_Audit_Report_IG1005A": datetime.datetime(2010, 1, 1), "PC_Paraguay_Final_Audit_Report_IG1010A": datetime.datetime(2010, 8, 1), "PC_Process_for_Soliciting_Awarding_and_Administering_Contracts_IG1006A": datetime.datetime(2010, 3, 1), "PC_Suriname_Final_Audit_Report_IG1006A": datetime.datetime(2010, 5, 1), "PC_Tanzania_Final_Audit_Report_IG1004A": datetime.datetime(2010, 1, 1), "MIR_Unnecessary_Use_of_Social_Security_Numbers_of_Agency_Forms_IG-09-0000": datetime.datetime(2009, 3, 9), "MAR_Benin_Security_Concerns_IG-09-0000": datetime.datetime(2009, 5, 29), "PCMorocco_0910A": datetime.datetime(2009, 7, 1), "PC_Safety_and_Security_Final_Audit_Report_IG1008A": datetime.datetime(2010, 4, 1), "PC_Guatemala_Final_Audit_Report_IG0904A": datetime.datetime(2009, 1, 1), "PC_Guinea_Final_Audit_Report_IG0909A": datetime.datetime(2009, 3, 1), "PC_Morocco_Final_Audit Report_IG0910A": datetime.datetime(2009, 7, 1), "PC_Nicaragua_Audit_Report_IG0912A": datetime.datetime(2009, 7, 1), "PC_Purchase_Card_Final_Audit_Report_IG0908A": datetime.datetime(2009, 3, 1), "PC_Samoa_Final_Audit_Report_IG0906A": datetime.datetime(2009, 3, 1), "PC_Senegal_FollowUp_Audit_Report_IG0911FUA": datetime.datetime(2009, 7, 1), "PC_Swaziland_Final_Audit_Report_IG0901A": datetime.datetime(2008, 11, 1), "PC_Uganda_FollowUp_Audit_Report_IG0907FUA": datetime.datetime(2009, 3, 1), "PC_Armenia_FollowUp_Audit_Report_IG-08-01": datetime.datetime(2007, 10, 1), "PC_Azerbaijan_Final_Audit_Report_IG-08-09-A": datetime.datetime(2008, 3, 1), "PC_Botswana_Final_Audit_Report_IG-08-16-A": datetime.datetime(2008, 9, 1), "PC_China_FollowUP_Audit_Report_IG-08-06": datetime.datetime(2008, 3, 1), "PC_EC_Final_Audit_Report_IG-08-03-A": datetime.datetime(2007, 12, 1), "PC_ElSalvador_Final_Audit_Report_IG-08-14-A": datetime.datetime(2008, 9, 1), "PC_Fiji_FInal_Audit_Report_IG08-04-A": datetime.datetime(2008, 1, 1), "Zambia_Audit_IG-07-16-A": datetime.datetime(2007, 9, 1), "PC_Georgia_Final_Audit_Report_IG-08-05-A": datetime.datetime(2008, 1, 1), "Memo_Internal_Controls_Online_Collaboration_Tools_IG-07-0000": datetime.datetime(2007, 9, 28), "PC_Kazakhstan_Final_Audit_Report_IG-08-02-A": datetime.datetime(2007, 10, 1), "PC_Peru_Final_Audit_Report_IG-08-07-A": datetime.datetime(2008, 3, 1), "PC_Philippines_Final_Audit_Report_IG-08-10-A": datetime.datetime(2008, 7, 1), "PC_South_Africa_FollowUp_Audit_Report_7-15-08": datetime.datetime(2008, 7, 1), "PC_Vanuatu_FollowUp_Audit_Report_IG-08-15": datetime.datetime(2008, 9, 1), "PC_Cameroon_Final_Audit_Report_IG-07-15-A": datetime.datetime(2007, 8, 1), "Kyrgyz_Republic_Audit_IG-07-05-A": datetime.datetime(2007, 3, 1), "PC_China_Final_Audit_Report_IG-07-07-A": datetime.datetime(2007, 3, 1), "PC_Honduras_FollowUp_Audit_Report": datetime.datetime(2007, 6, 1), "PC_Jordan_Final_Audit_Report_IG-07-17-A": datetime.datetime(2007, 9, 1), "PC_Niger_Final_Audit_Report_IG-07-13-A": datetime.datetime(2007, 7, 1), "PC_Panama_FollowUp_Audit_Report": datetime.datetime(2007, 6, 1), "PC_Senegal_Final_Audit_Report_IG-07-18-A": datetime.datetime(2007, 9, 1), "PC_SSN_FollowUp_Report": datetime.datetime(2007, 6, 1), "PC_Thailand_Final_Audit_Report_IG-07-19-A": datetime.datetime(2007, 9, 1), "PC_Uganda_FollowUp_Audit_Report_0703": datetime.datetime(2006, 12, 1), "PC_Zambia_FollowUp_Audit_Report_0704": datetime.datetime(2006, 1, 1), "PC_Zambia_Final_Audit_Report_IG-07-16-A": datetime.datetime(2007, 9, 1), "PC-Benin-Final-Eval_Sep-3-2015": datetime.datetime(2015, 9, 4), "PCIG_SARC_20150930": datetime.datetime(2015, 11, 30), "Guatemala_Final_Evaluation_Report": datetime.datetime(2015, 5, 13), "Lesotho_Final_Evaluation_Report": datetime.datetime(2015, 3, 31), "PCIG_SARC_20150331": datetime.datetime(2015, 5, 6), "Nepal_Final_Report": datetime.datetime(2015, 12, 1), "PCIG_Sierra_Leone_Final_Evaluation": datetime.datetime(2015, 1, 30), "PCIG_Armenia_Final_Evalation_Report": datetime.datetime(2014, 8, 19), "PCIG_Ecuador_Final_Evaluation_Report": datetime.datetime(2014, 5, 21), "PCIG_Mexico_Final_Evaluation_Report": datetime.datetime(2014, 6, 13), "PCIG_Final_Program_Evaluation_of_Peace_Corps_SARRR_Training": datetime.datetime(2013, 11, 21), "PCIG_Final_Program_Evaluation_Volunteer_Sexual_Assault_Policy": datetime.datetime(2013, 11, 21), "PCIG_Philippines_Final_Evaluation_Report": datetime.datetime(2014, 9, 16), "PCIG_Final_Evaluation_Report_Training_Peace_Corps_Overseas_Staff": datetime.datetime(2014, 9, 30), "PCIG_Colombia_Evaluation_Report": datetime.datetime(2013, 4, 29), "PCIG_Malawi_Evaluation_Report": datetime.datetime(2013, 3, 22), "PCIG_Moldova_Final_Evaluation_Report": datetime.datetime(2013, 9, 16), "PCIG_Namibia_Final_Evaluation_Report": datetime.datetime(2013, 3, 15), "PC_China_Final_Evaluation_Report_IG1204E": datetime.datetime(2012, 5, 24), "PC_Fiji_Final_Evaluation_Report_IG1201E": datetime.datetime(2011, 11, 30), "Kyrgyz_Republic_Final_Evaluation__December_6_2011": datetime.datetime(2011, 12, 6), "Final_Report_Review_of_the_Peace_Corps_Implementation_of_Guidelines_Related_to_Volunteer_Victims_of_Rape_and_Sexual_Assault": datetime.datetime(2012, 9, 27), "PC_Final_Evaluation_Report_on_Impacts_of_the_Five_Year_Rule_IG1205E": datetime.datetime(2012, 6, 20), "PC_Indonesia_Final_Evaluations_IG1207E": datetime.datetime(2012, 9, 19), "PC_Kyrgyz_Republic_Final_Evaluation_ Report_IG1202E": datetime.datetime(2011, 12, 6), "PC_Peru_Final_Evaluation_Report_IG1203E": datetime.datetime(2011, 3, 26), "PC_Uganda_Final_Evaluation_IG1206E": datetime.datetime(2012, 7, 6), "PC_Cambodia_Final_Evaluation_Report_IG1104E": datetime.datetime(2011, 5, 5), "PC_Ethiopia_Final_Program_Evaluation_Report_IG1102E": datetime.datetime(2011, 1, 14), "PC_Jamaica_Final_Evaluation_Report_IG1103E": datetime.datetime(2011, 2, 28), "PC_Liberia_Final_Evaluation_Report_IG1107E": datetime.datetime(2011, 9, 8), "PC_Romania_Final_Eval_Report_IG1105E": datetime.datetime(2011, 6, 30), "PC_Swaziland_Final_Evaluation_Report_IG1106E": datetime.datetime(2011, 8, 19), "PC_VDS_Follow-up_Final_Program_Evaluation_IG1101E": datetime.datetime(2010, 12, 1), "PC_Morocco_Final_Program_Evaluation_Report_IG1006E": datetime.datetime(2010, 2, 1), "PC_Suriname_Final_Program_Evaluation_Report_IG1009E": datetime.datetime(2010, 7, 1), "PC_Togo_Final_Program_Evaluation_IG1010E": datetime.datetime(2010, 9, 1), "PC_Turkmenistan_Final_Program_Evaluation_Report_IG1002E": datetime.datetime(2009, 11, 1), "PC_Belize_Final_Program_Evaluation_Report_IG0914E": datetime.datetime(2009, 8, 1), "PC_Dominican_Republic_Program_Evaluation_Report_IG0903E": datetime.datetime(2008, 12, 1), "PC_Ghana_Final_Program_Evaluation_Report_IG0913E": datetime.datetime(2009, 7, 1), "PC_Guyana_Final_Program_Evaluation_Report_IG0905E": datetime.datetime(2009, 2, 1), "PC_Jordan_Final_Program_Evaluation_Report_IG0915E": datetime.datetime(2009, 9, 1), "PC_Nicaragua_Program_Evaluation_Report_IG0902E": datetime.datetime(2008, 12, 1), "PC_Albania_Final_Evaluation_Report_IG-08-12-E": datetime.datetime(2008, 8, 1), "PC_Medical_Clearance_System_Report_IG-08-08-E": datetime.datetime(2008, 3, 1), "PCIG_Safety_and_Security_Final_Evaluation_Report_2008": datetime.datetime(2008, 8, 1), "PC_Azerbaijan_Final_Evaluation_Report_IG-07-11-E": datetime.datetime(2007, 7, 1), "PC_Cameroon_Final_Evaluation_Report_IG-07-01-E": datetime.datetime(2006, 10, 1), "PC_EC_Final_Evaluation_Report_IG-07-12-E": datetime.datetime(2007, 7, 1), "PC_Ecuador_Final_Evaludation_Report_IG-0704AE": datetime.datetime(2007, 1, 1), "PC_Guinea_Final_Evaluation_Report_IG-07-14-E": datetime.datetime(2007, 8, 1), "PC_ProgramStudyReport11": datetime.datetime(2007, 1, 1), "PC_South_Africa_Final_Evaluation-Report-IG-0702EA": datetime.datetime(2006, 10, 1), "Uganda_Followup_Audit_IG-07-03-FUA": datetime.datetime(2006, 12, 1), "Management_Advisory_Report-FOIA": datetime.datetime(2016, 3, 10), "Final_Report_Follow_Up_Evaluation_of_Issues_in_2010_PC_Morocco_Assessment_of_Medical_Care": datetime.datetime(2016, 3, 1), "PCIG_Buller_Peace_Corps_IG_Statement_02_03": datetime.datetime(2015, 2, 3), "PCIG_Buller_Peace_Corps_IG_Statement_9_10": datetime.datetime(2014, 9, 10), "PCIG_Kathy_A_Buller_Testimony_PC_OIG_Jan-15-2014_Strengthen_OIG_Oversight": datetime.datetime(2014, 1, 15), "buller_testimony_sfac_10_06_11": datetime.datetime(2011, 10, 6), "IGAccess.JGlennLtr.072315": datetime.datetime(2015, 7, 23), "CIGIE_Letter_to_HSGAC_HOGR_8-3-15": datetime.datetime(2015, 8, 3), "PCIG_FY_2016_OIG_Annual_Plan": datetime.datetime(2015, 9, 1), "PCIG_FY_2016-18_OIG_Strategic_Plan": datetime.datetime(2015, 8, 1), "Strategic_Plan_FY_17-19_for_web": datetime.datetime(2016, 8, 1), "Peace_Corps_Rwanda_-_Final_Evaluation_Report_IG-16-02-E": datetime.datetime(2016, 8, 11), "Senegal_Final_Audit_Report_IG-16-04-A_xFYq3ir": datetime.datetime(2016, 7, 26), "PGIC_SARC_20160331": datetime.datetime(2016, 5, 9), "Indonesia_Final_Audit_Report_IG-16-03-A_Cz1wEbX": datetime.datetime(2016, 7, 17), "Safety_and_security_weaknesses_in_PCCameroon_NEW": datetime.datetime(2016, 7, 31), "MAR_Site_History_Files_2016": datetime.datetime(2016, 8, 24), "Kathy_A._Buller_Inspector_General_Testimony": datetime.datetime(2011, 5, 11), "2015_FISMA_highlights_M26WqOS": datetime.datetime(2015, 11, 1), "MAR_Conference_Cost_Reporting_IG-16-03-SR": datetime.datetime(2016, 9, 20), "Final_Audit_of_Peace_Corps_Colombia_IG-16-05-A_7OVJYcv": datetime.datetime(2016, 9, 23), "Recurring_Issues_Report": datetime.datetime(2016, 9, 23), "OIG_Annual_Plan_FY17": datetime.datetime(2016, 9, 30), "2016_Peace_Corps_Management_and_Performance_Challenges": datetime.datetime(2016, 11, 30), "FISMA_final_report": datetime.datetime(2016, 11, 10), "406_highlights": datetime.datetime(2016, 8, 1), "Final_Evaluation_Report_on_the_Peace_Corps_Sexual_Assault_Risk_Reduction_and_Response_Program": datetime.datetime(2016, 11, 28), "OIG_SARC_April_2016_-_September_2016": datetime.datetime(2016, 9, 1), "Final_Audit_of_Peace_Corps_China_IG-17-01-A": datetime.datetime(2017, 1, 23), "PC_IG_Kathy_A._Buller_Written_Statement_Feb_1_2017": datetime.datetime(2017, 2, 1), "Audit_of_Peace_Corps_Georgia_IG-17-02-A": datetime.datetime(2017, 3, 28), "Peace_Corps_Kosovo_Final_Evaluation_Report_IG-17-02-E": datetime.datetime(2017, 5, 12), "OIG_Semiannual_Report_to_Congress_Oct_2016_-_March_2017": datetime.datetime(2017, 3, 1), "Final_Report_on_the_Audit_of_Peace_Corps_Eastern_Caribbean_IG-17-03-A": datetime.datetime(2017, 6, 8), "Final_Report_on_the_Evaluation_of_PC_South_Africa_IG-17-03-E": datetime.datetime(2017, 6, 19), "Interim_Update": datetime.datetime(2017, 6, 28), "Management_Implication_Report_-_Challenges_Associated_with_Staff_Turnover": datetime.datetime(2017, 7, 31), "July_2017_Memo_Summarizing_Results_of_Review_of_Unredacted_Sexual_Assault_Case_Documentation_8.10.17_hyperlink": datetime.datetime(2017, 8, 10), "Final_Report_on_the_Audit_of_Peace_Corps_Cambodia": datetime.datetime(2017, 9, 28), "Strategic_Plan_FY_18-20_web": datetime.datetime(2017, 9, 29), "Annual_Plan_2018": datetime.datetime(2017, 10, 5), "Final_Report_on_the_Follow-up_Audit_of_Peace_Corps_Zambia_X05a1s9": datetime.datetime(2017, 9, 29), "2017_FISMA_Final_Report": datetime.datetime(2017, 10, 31), "PC_OIG_2017_DATA_Act_Audit_Report": datetime.datetime(2017, 11, 3), "Management_and_performance_challenges_WEB_page_numbers": datetime.datetime(2017, 11, 7), "Buller_Peace_Corps_OIG_Testimony_Nov_15_2017": datetime.datetime(2017, 11, 15), "Final_Report_on_the_Evaluation_of_PC_Costa_Rica": datetime.datetime(2017, 11, 22), "Final_System_Review_Report_-_Peace_Corps_OIG_Peer_Review_11.28.17": datetime.datetime(2017, 11, 28), "Peace_Corps_OIG_Semiannual_Report_to_Congress_APR-SEP_2017": datetime.datetime(2017, 11, 8), "Final_Report_on_the_Evaluation_of_Peace_Corps_Albania_IG-18-02-E": datetime.datetime(2017, 12, 12), "Summary_of_Internal_Control_Issues_Over_the_Peace_Corps_Financial_Reporting_FY_2017": datetime.datetime(2018, 1, 4), "Final_Report_on_the_Follow_Up_Review_of_Peace_Corps_Peru": datetime.datetime(2018, 1, 25), "Final_Report_on_Follow_Up_Review_of_Peace_Corps_Uganda": datetime.datetime(2018, 4, 9), "Final_Report_on_the_Audit_of_Peace_Corps_Panama_IG-18-01-A": datetime.datetime(2018, 5, 2), "SARC_OCT17-MAR18_for_web": datetime.datetime(2018, 6, 1), } REPORT_TYPE_MAP = { 'Plans & Reports': 'other', 'Special Review': 'other', 'Audit': 'audit', 'None': 'other', 'Evaluation': 'evaluation', 'Annual Report': 'semiannual_report', 'Letter': 'other', 'Testimony': 'testimony', 'Management Advisory': 'other', 'Investigation': 'investigation', 'Policy & Procedure': 'other', } # Several consecutive reports appear twice on pages 4 and 5 at time of writing doubled_reports = { "PCIG_Final_Program_Evaluation_of_Peace_Corps_SARRR_Training": 0, "PCIG_South_Africa_Final_Audit_Report": 0, "PCIG_Moldova_Final_Evaluation_Report": 0, "PC_Final_Audit_Report_Jordan_IG1207": 0, "PC_Ethiopia_Final_Program_Evaluation_Report_IG1102E": 0, "PC_Ethiopia_Final_Audit_Report_IG1102A": 0, "PC_Fiji_Final_Evaluation_Report_IG1201E": 0, } def run(options): year_range = inspector.year_range(options, archive) # Pull the reports page = 1 while True: doc = utils.beautifulsoup_from_url(REPORTS_URL % page) results = doc.select(".teaser") if not results: raise inspector.NoReportsFoundError("Peace Corps") for result in results: report = report_from(result, year_range) if report: inspector.save_report(report) if doc.select(".pager__link--next"): page += 1 else: break def report_from(result, year_range): link = result.find("a") report_url = urllib.parse.unquote(link.get('href')) report_filename = report_url.split("/")[-1] report_id, _ = os.path.splitext(report_filename) report_id = urllib.parse.unquote(report_id) title = link.text report_type = None tag_text = None if "Semiannual Report to Congress" in title: report_type = "semiannual_report" else: for tag in result.select(".ul--tags li"): tag_text = tag.text.strip() if tag_text in REPORT_TYPE_MAP: report_type = REPORT_TYPE_MAP[tag_text] break if not report_type: raise Exception("Unrecognized report type %s" % tag_text) published_on = None if report_id in REPORT_PUBLISHED_MAPPING: published_on = REPORT_PUBLISHED_MAPPING[report_id] if not published_on: try: published_on_text = title.split("-")[-1].strip() published_on_text = published_on_text.replace("Sept.", "September") published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y') except ValueError: pass if not published_on: admin.log_no_date("peacecorps", report_id, title, report_url) return if report_id in doubled_reports: if doubled_reports[report_id] == 0: doubled_reports[report_id] += 1 else: return if published_on.year not in year_range: logging.debug("[%s] Skipping, not in requested range." % report_url) return report = { 'inspector': 'peacecorps', 'inspector_url': 'https://www.peacecorps.gov/about/inspectors-general/', 'agency': 'peacecorps', 'agency_name': 'Peace Corps', 'type': report_type, 'report_id': report_id, 'url': report_url, 'title': title, 'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"), } return report utils.run(run) if
<filename>venv/Lib/site-packages/tobiiresearch/implementation/EyeTracker.py<gh_stars>0 from tobiiresearch.interop import interop from tobiiresearch.implementation.Errors import _on_error_raise_exception from tobiiresearch.implementation.EyeImageData import EyeImageData from tobiiresearch.implementation.ExternalSignalData import ExternalSignalData from tobiiresearch.implementation.GazeData import GazeData from tobiiresearch.implementation.UserPositionGuide import UserPositionGuide from tobiiresearch.implementation.HMDGazeData import HMDGazeData from tobiiresearch.implementation._LogEntry import _LogEntry from tobiiresearch.implementation.Notifications import CalibrationModeEnteredData, CalibrationModeLeftData from tobiiresearch.implementation.Notifications import CalibrationChangedData from tobiiresearch.implementation.Notifications import ConnectionLostData, ConnectionRestoredData from tobiiresearch.implementation.Notifications import DisplayAreaChangedData, GazeOutputFrequencyChangedData from tobiiresearch.implementation.Notifications import TrackBoxChangedData, EyeTrackingModeChangedData from tobiiresearch.implementation.Notifications import DeviceFaultsData, DeviceWarningsData from tobiiresearch.implementation.StreamErrorData import StreamErrorData from tobiiresearch.implementation.TimeSynchronizationData import TimeSynchronizationData import threading _EYETRACKER_NOTIFICATIONS = "_eyetracker_notifications" _invalid_parameter = 10 # __TobiiProStatus.invalid_parameter _invalid_operation = 11 # __TobiiProStatus.invalid_operation ## # Indicates that the device can have display areas set. # # Value in tuple EyeTracker.device_capabilities # <CodeExample>create_eyetracker.py</CodeExample> CAPABILITY_CAN_SET_DISPLAY_AREA = "capability_can_set_display_area" ## # Indicates that the device can deliver an external signal stream. # # Value in tuple EyeTracker.device_capabilities # <CodeExample>create_eyetracker.py</CodeExample> CAPABILITY_HAS_EXTERNAL_SIGNAL = "capability_has_external_signal" ## # Indicates that the device can deliver an eye image stream. # # Value in tuple EyeTracker.device_capabilities # <CodeExample>create_eyetracker.py</CodeExample> CAPABILITY_HAS_EYE_IMAGES = "capability_has_eye_images" ## # Indicates that the device can deliver a gaze data stream. # Standard for all screen based eye trackers. # # Value in tuple EyeTracker.device_capabilities # <CodeExample>create_eyetracker.py</CodeExample> CAPABILITY_HAS_GAZE_DATA = "capability_has_gaze_data" ## # Indicates that the device can deliver a HMD gaze data stream. # # Value in tuple EyeTracker.device_capabilities # <CodeExample>create_eyetracker.py</CodeExample> CAPABILITY_HAS_HMD_GAZE_DATA = "capability_has_hmd_gaze_data" ## # Indicates that screen based calibration can be performed on the device. # # Value in tuple EyeTracker.device_capabilities # <CodeExample>create_eyetracker.py</CodeExample> CAPABILITY_CAN_DO_SCREEN_BASED_CALIBRATION = "capability_can_do_screen_based_calibration" ## # Indicates that HMD based calibration can be performed on the device. # # Value in tuple EyeTracker.device_capabilities # <CodeExample>create_eyetracker.py</CodeExample> CAPABILITY_CAN_DO_HMD_BASED_CALIBRATION = "capability_can_do_hmd_based_calibration" ## # Indicates that monocular calibration can be performed on the device. # # Value in tuple EyeTracker.device_capabilities # <CodeExample>create_eyetracker.py</CodeExample> CAPABILITY_CAN_DO_MONOCULAR_CALIBRATION = "capability_can_do_monocular_calibration" ## # Indicates that it's possible to get and set the HMD lens configuration on the device. # # Value in tuple EyeTracker.device_capabilities # <CodeExample>create_eyetracker.py</CodeExample> CAPABILITY_HAS_HMD_LENS_CONFIG = "capability_has_hmd_lens_config" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for external signal. # # You will get a callback when the value of the external signal port (TTL input) on the eye tracker device changes. Not # all eye trackers have an output trigger port. The output feature could be used to synchronize the eye tracker data # with data from other devices. The output data contains a time reference that matches the time reference on the time # synchronized gaze data. Callbacks will receive an ExternalSignalData object or a dictionary with values if # as_dictionary is True. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>external_signal.py</CodeExample> EYETRACKER_EXTERNAL_SIGNAL = "eyetracker_external_signal" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for eye images. # # You will get a callback when a new eye image is received, and the occurrence depends on the eye tracker model. Not all # eye tracker models support this feature. If no one is listening to gaze data, the eye tracker will only deliver full # images, otherwise either cropped or full images will be delivered depending on whether or not the eye tracker has # detected eyes. Callbacks will receive an EyeImageData object or a dictionary with values if as_dictionary is True. # <CodeExample>eye_images.py</CodeExample> EYETRACKER_EYE_IMAGES = "eyetracker_eye_images" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for gaze data. # # You will get a callback when time synchronized gaze is received. Time synchronized gaze is not supported on all eye # trackers, other eye trackers need additional license to activate this support. Callbacks will receive a GazeData # object or a dictionary with values if as_dictionary is True. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>gaze_data.py</CodeExample> EYETRACKER_GAZE_DATA = "eyetracker_gaze_data" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for gaze data. # # Callbacks will receive a UserPositionGuide, object or a dictionary with values if as_dictionary is True. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>user_position_guide.py</CodeExample> EYETRACKER_USER_POSITION_GUIDE = "eyetracker_user_position_guide" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for gaze data. # # You will get a callback when time synchronized HMD gaze is received. # Time synchronized HMD gaze is not supported on all eye trackers. # Callbacks will receive a HMDGazeData object or a dictionary with values if as_dictionary is True. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>gaze_data.py</CodeExample> EYETRACKER_HMD_GAZE_DATA = "eyetracker_hmd_gaze_data" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for calibration mode entered messages. # # You will get a callback when calibration mode is entered. Callbacks will receive a CalibrationModeEnteredData object # or a dictionary with values if as_dictionary is True. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>notifications.py</CodeExample> EYETRACKER_NOTIFICATION_CALIBRATION_MODE_ENTERED = "eyetracker_notification_calibration_mode_entered" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for calibration mode left messages. # # You will get a callback when calibration mode is left. Callbacks will receive a CalibrationModeLeftData object # or a dictionary with values if as_dictionary is True. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>notifications.py</CodeExample> EYETRACKER_NOTIFICATION_CALIBRATION_MODE_LEFT = "eyetracker_notification_calibration_mode_left" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for calibration changed messages. # # You will get a callback when the calibration is changed. Callbacks will receive a CalibrationChangedData object # or a dictionary with values if as_dictionary is True. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>notifications.py</CodeExample> EYETRACKER_NOTIFICATION_CALIBRATION_CHANGED = "eyetracker_notification_calibration_changed" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for connection lost messages. # # You will get a callback when connection to the eye tracker is lost. Callbacks will receive a ConnectionLostData # object or a dictionary with values if as_dictionary is True. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>notifications.py</CodeExample> EYETRACKER_NOTIFICATION_CONNECTION_LOST = "eyetracker_notification_connection_lost" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for connection restored messages. # # You will get a callback when connection to the eye tracker is restored. Callbacks will receive a # ConnectionRestoredData object or a dictionary with values if as_dictionary is True. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>notifications.py</CodeExample> EYETRACKER_NOTIFICATION_CONNECTION_RESTORED = "eyetracker_notification_connection_restored" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for display area changed messages. # # You will get a callback when the display area is changed. Callbacks will receive a DisplayAreaChangedData object # or a dictionary with values if as_dictionary is True. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>notifications.py</CodeExample> EYETRACKER_NOTIFICATION_DISPLAY_AREA_CHANGED = "eyetracker_notification_display_area_changed" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for gaze output frequency changed messages. # # You will get a callback when the gaze output frequency is changed. Callbacks will receive a # GazeOutputFrequencyChangedData object or a dictionary with values if as_dictionary is True. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>notifications.py</CodeExample> EYETRACKER_NOTIFICATION_GAZE_OUTPUT_FREQUENCY_CHANGED = "eyetracker_notification_gaze_output_frequency_changed" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for track box changed messages. # # You will get a callback when the track box is changed. Callbacks will receive a TrackBoxChangedData object or a # dictionary with values if as_dictionary is True. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>notifications.py</CodeExample> EYETRACKER_NOTIFICATION_TRACK_BOX_CHANGED = "eyetracker_notification_track_box_changed" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for eyetracking mode changed messages. # # You will get a callback when the eyetracking mode is changed. Callbacks will receive a EyeTrackingModeChangedData # object or a dictionary with values if as_dictionary is True. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>notifications.py</CodeExample> EYETRACKER_NOTIFICATION_EYE_TRACKING_MODE_CHANGED = "eyetracker_notification_eye_tracking_mode_changed" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for device faults messages. # # You will get a callback when new faults are received. Callbacks will receive a DeviceFaultsData # object or a dictionary with values if as_dictionary is True. # The faults information consists of a comma seprated string. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>notifications.py</CodeExample> EYETRACKER_NOTIFICATION_DEVICE_FAULTS = "eyetracker_notification_device_faults" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for device warnings messages. # # You will get a callback when new warnings are received. Callbacks will receive a DeviceWarningsData # object or a dictionary with values if as_dictionary is True. # The warnings information consists of a comma seprated string. # See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object. # <CodeExample>notifications.py</CodeExample> EYETRACKER_NOTIFICATION_DEVICE_WARNINGS = "eyetracker_notification_device_warnings" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for stream errors. # # You will get a callback when an error occurs on other streams. You can get errors when subscribing, when something # happened to the connection in the stream pump or when an error was raised in a callback. Callbacks will receive a # StreamErrorData object or a dictionary with values if as_dictionary is True. # <CodeExample>stream_errors.py</CodeExample> EYETRACKER_STREAM_ERRORS = "eyetracker_stream_errors" ## # Used in EyeTracker.subscribe_to and EyeTracker.unsubscribe_from for time synchronization data. # # You will get a callback when the computer and the eye trackers clocks gets synchronized. To handle normal drifts # between clocks the clocks are checked on regular basis, and this results in that the time stamps are adjusted for
'\n') f.close() def print_prop_of_var_to_txt(values, system_name, directory): """ Print list of proportions of variance explained by each principal component to a text file. :param values: array or list, proportions of variance in descending order :param system_name: name of the system, used for the text file name :param directory: output directory to put the output text file :return: None """ normalized_values = values / np.sum(values) df = pd.DataFrame({'Principal Component': pd.Series([i + 1 for i in range(len(values))]), 'Singular Value': values, 'Prop. of Variance': normalized_values, 'Cumul. Prop. of Var.': np.cumsum(normalized_values)}) pd.set_option('display.expand_frame_repr', False) print(df.head()) df.to_csv(os.path.join(directory, system_name + '_prop_of_var.txt'), sep='\t', index=None) def print_distance_weights_to_files(directory, n_dim, system_name, pca_components, num_atoms, selected_atom_indexes=None): for n in range(n_dim): if selected_atom_indexes: distance_vector_indexes = list(pd.DataFrame(list(selected_atom_indexes.values()))[1]) else: distance_vector_indexes = range(len(pca_components[n])) d = [] for k, l in zip(distance_vector_indexes, range(len(pca_components[n]))): i, j = calc_ij(k, num_atoms) coeff = pca_components[n][l] d.append({'atom 1': i, 'atom 2': j, 'Coefficient of Distance': coeff}) d_df = pd.DataFrame(d) sorted_d = d_df.reindex(d_df['Coefficient of Distance'].abs().sort_values(ascending=False).index) output_path = os.path.join(directory, system_name + '_PC%s_components.txt' % (n + 1)) sorted_d.to_csv(output_path, sep='\t', index=None) def print_distance_weights_to_files_select_atom_indexes(atom_indexes, n_dim, pca_components, system_name, directory): for n in range(n_dim): d = [] for k in range(len(pca_components[n])): coeff = pca_components[n][k] d.append({'atom 1': atom_indexes[k][0], 'atom 2': atom_indexes[k][1], 'Coefficient of Distance': coeff}) d_df = pd.DataFrame(d) sorted_d = d_df.reindex(d_df['Coefficient of Distance'].abs().sort_values(ascending=False).index) sorted_d.to_csv(os.path.join(directory, system_name + '_PC%s_components.txt' % (n + 1)), sep='\t', index=None) def print_distance_weights_to_files_weighted(directory, n_dim, system_name, pca_components, pca_values, num_atoms, display=False): for n in range(n_dim): d = [] for k in range(len(pca_components[n])): i, j = calc_ij(k, num_atoms) coeff = (pca_values[n] / sum(pca_values)) * pca_components[n][k] d.append({'atom 1': i, 'atom 2': j, 'Coefficient of Distance': coeff}) d_df = pd.DataFrame(d) sorted_d = d_df.reindex(d_df['Coefficient of Distance'].abs().sort_values(ascending=False).index) sorted_d.to_csv(os.path.join(directory, system_name + '_PC%s_components_weighted.txt' % (n + 1)), sep='\t', index=None) if display: print("PC%s" % (n + 1)) print(sorted_d) def transform_new_data(new_xyz_file_path, output_directory, n_dim, pca_fit, pca_components, pca_mean, original_traj_coords, input_type, stereo_atoms=[1, 2, 3, 4], mw=False, remove_atom_types=None, selected_atom_indexes=None): if input_type == "Cartesians": new_system_name, components_df = transform_new_data_cartesians(new_xyz_file_path, output_directory, n_dim, pca_fit, pca_components, pca_mean, original_traj_coords, mw=mw, remove_atom_types=remove_atom_types) elif input_type == "Distances": if selected_atom_indexes: new_system_name, components_df = transform_new_data_only_top_distances(new_xyz_file_path, output_directory, n_dim, pca_fit, pca_components, pca_mean, selected_atom_indexes=selected_atom_indexes, stereo_atoms=stereo_atoms, mw=mw, remove_atom_types=remove_atom_types) else: new_system_name, components_df = transform_new_data_distances(new_xyz_file_path, output_directory, n_dim, pca_fit, pca_components, pca_mean, stereo_atoms=stereo_atoms, mw=mw, remove_atom_types=remove_atom_types) else: print("ERROR: Please specify input_type=\"Cartesians\" or \"Distances\"") return new_system_name, components_df def transform_new_data_cartesians(new_trajectory_file_path, output_directory, n_dim, pca_fit, pca_components, pca_mean, original_traj_coords, mw=False, remove_atom_types=None, topology=None): """ Takes as input a new trajectory (xyz file) for a given system for which dimensionality reduction has already been conducted and transforms this new data into the reduced dimensional space. Generates a plot, with the new data atop the "trained" data, and generates xyz files for the new trajectories represented by the principal components. :param new_trajectory_file_path: new input to dimensionality reduction (xyz file location), str :param output_directory: output directory, str :param n_dim: number of dimensions of the reduced dimensional space, int :param pca_fit: fit from PCA on training data :param pca_components: components from PCA on training data, array :param pca_mean: mean of input data to PCA (mean structure as coords or distances), array :param original_traj_coords: coordinates of the trajectory that the reduced dimensional space was trained on :param MW: whether coordinates should be mass weighted prior to PCA, bool """ print("\nTransforming %s into reduced dimensional representation..." % new_trajectory_file_path) new_system_name, atoms, coordinates = read_traj_file(new_trajectory_file_path) if remove_atom_types is not None: atoms, coordinates = remove_atoms_by_type(remove_atom_types, atoms, coordinates) if not os.path.exists(output_directory): os.makedirs(output_directory) print("\nResults for %s input will be stored in %s" % (new_trajectory_file_path, output_directory)) # Determining names of output directories/files file_name_end = "_Cartesians" # Align structures using Kabsch algorithm so rotations don't affect PCs aligned_original_traj_coords = kabsch(original_traj_coords) coords_for_analysis = align_to_original_traj(coordinates, aligned_original_traj_coords) if mw is True: file_name_end = file_name_end + "_MW" mass_weighted_coords = mass_weighting(atoms, coords_for_analysis) coords_for_analysis = mass_weighted_coords else: file_name_end = file_name_end + "_noMW" coords_for_analysis = coords_for_analysis coords_for_analysis = np.reshape(coords_for_analysis, (coords_for_analysis.shape[0], coords_for_analysis.shape[1] * coords_for_analysis.shape[2])) components = pca_fit.transform(coords_for_analysis) components_df = pd.DataFrame(components) PCs_separate = [] for i in range(0, n_dim): PCi = np.dot(components[:, i, None], pca_components[None, i, :]) + pca_mean PCs_separate.append(PCi) PCs_combined = np.dot(components, pca_components) + pca_mean PCs_separate = np.array(PCs_separate) PCs_combined = np.array(PCs_combined) # Reshape n x 3N x 1 arrays into n x N x 3 arrays PCs_separate = np.reshape(PCs_separate, (PCs_separate.shape[0], PCs_separate.shape[1], int(PCs_separate.shape[2] / 3), 3)) PCs_combined = np.reshape(PCs_combined, (1, PCs_combined.shape[0], int(PCs_combined.shape[1] / 3), 3)) if mw is True: # Remove mass-weighting of coordinates no_mass_weighting_PCs_separate = [remove_mass_weighting(atoms, PCs_separate[i]) for i in range(n_dim)] no_mass_weighting_PCs_combined = remove_mass_weighting(atoms, PCs_combined) else: no_mass_weighting_PCs_separate = PCs_separate no_mass_weighting_PCs_combined = PCs_combined aligned_PCs_separate = no_mass_weighting_PCs_separate aligned_PCs_combined = no_mass_weighting_PCs_combined make_pc_xyz_files(output_directory, new_system_name + file_name_end, atoms, aligned_PCs_separate) make_pc_xyz_files(output_directory, new_system_name + file_name_end, atoms, aligned_PCs_combined) return new_system_name, components_df def transform_new_data_distances(new_trajectory_file_path, output_directory, n_dim, pca_fit, pca_components, pca_mean, stereo_atoms=[1, 2, 3, 4], mw=False, remove_atom_types=None): """ Takes as input a new trajectory (xyz file) for a given system for which dimensionality reduction has already been conducted and transforms this new data into the reduced dimensional space. Generates a plot, with the new data atop the "trained" data, and generates xyz files for the new trajectories represented by the principal components. :param new_trajectory_file_path: new input to dimensionality reduction (xyz file location), str :param output_directory: output directory, str :param n_dim: number of dimensions of the reduced dimensional space, int :param pca_fit: fit from PCA on training data :param pca_components: components from PCA on training data, array :param pca_mean: mean of input data to PCA (mean structure as coords or distances), array :param original_traj_coords: coordinates of the trajectory that the reduced dimensional space was trained on :param stereo_atoms: indexes of 4 atoms surrounding stereogenic center, list of ints :param MW: whether coordinates should be mass weighted prior to PCA, bool """ print("\nTransforming %s into reduced dimensional representation..." % new_trajectory_file_path) new_system_name, atoms, coordinates = read_traj_file(new_trajectory_file_path) if remove_atom_types is not None: atoms, coordinates = remove_atoms_by_type(remove_atom_types, atoms, coordinates) if not os.path.exists(output_directory): os.makedirs(output_directory) print("\nResults for %s input will be stored in %s" % (new_trajectory_file_path, output_directory)) # Determining names of output directories/files file_name_end = "_Distances" if mw is True: file_name_end = file_name_end + "_MW" coordinates_shifted = set_atom_one_to_origin(coordinates) mass_weighted_coords = mass_weighting(atoms, coordinates_shifted) coords_for_analysis = mass_weighted_coords else: file_name_end = file_name_end + "_noMW" coords_for_analysis = coordinates negatives, positives, zeroes, all_signs = chirality_test(coordinates, stereo_atoms) d2 = generate_distance_matrices(coords_for_analysis) coords_for_analysis = reshape_ds(d2) components = pca_fit.transform(coords_for_analysis) components_df = pd.DataFrame(components) PCs_separate = [] for i in range(0, n_dim): PCi = np.dot(components[:, i, None], pca_components[None, i, :]) + pca_mean PCs_separate.append(PCi) PCs_combined = np.dot(components, pca_components) + pca_mean PCs_separate = np.array(PCs_separate) PCs_combined = np.array(PCs_combined) # Turning distance matrix representations of structures back into Cartesian coordinates PCs_separate = [[distance_matrix_to_coords(PCs_separate[i][k]) for k in range(PCs_separate.shape[1])] for i in range(PCs_separate.shape[0])] PCs_combined = [distance_matrix_to_coords(PCs_combined[i]) for i in range(np.array(PCs_combined).shape[0])] PCs_separate = np.real(PCs_separate) PCs_combined = np.real(PCs_combined) if mw is True: # Remove mass-weighting of coordinates no_mass_weighting_PCs_separate = [remove_mass_weighting(atoms, PCs_separate[i]) for i in range(n_dim)] no_mass_weighting_PCs_combined = remove_mass_weighting(atoms, PCs_combined) else: no_mass_weighting_PCs_separate = PCs_separate no_mass_weighting_PCs_combined = PCs_combined # Reorient coordinates so they are in a consistent orientation aligned_PCs_separate = [kabsch(chirality_changes(no_mass_weighting_PCs_separate[i], stereo_atoms, all_signs)) for i in range(n_dim)] aligned_PCs_combined = kabsch(chirality_changes(no_mass_weighting_PCs_combined, stereo_atoms, all_signs)) aligned_PCs_combined = np.reshape(aligned_PCs_combined, (1, aligned_PCs_combined.shape[0], aligned_PCs_combined.shape[1], aligned_PCs_combined.shape[2])) make_pc_xyz_files(output_directory, new_system_name + file_name_end, atoms, aligned_PCs_separate) make_pc_xyz_files(output_directory, new_system_name + file_name_end, atoms, aligned_PCs_combined) return new_system_name, components_df def transform_new_data_only_top_distances(new_xyz_file_path, output_directory, n_dim, pca_fit, pca_components, pca_mean, selected_atom_indexes, stereo_atoms=[1, 2, 3, 4], mw=False, remove_atom_types=None): """ Takes as input a new trajectory (xyz file) for a given system for which dimensionality reduction has already been conducted and transforms this new data into the reduced dimensional space. Generates a plot, with the new data atop the "trained" data, and generates xyz files for the new trajectories represented by the principal components. :param new_xyz_file_path: new input to dimensionality reduction (xyz file location), str :param output_directory: output directory, str :param n_dim: number of dimensions of the reduced dimensional space, int :param pca_fit: fit from PCA on training data :param pca_components: components from PCA on training data, array :param pca_mean: mean of input data to PCA (mean structure as coords or distances), array :param original_traj_coords: coordinates of the trajectory that the reduced dimensional space was trained on :param stereo_atoms: indexes of 4 atoms surrounding stereogenic center, list of ints :param MW: whether coordinates should be mass weighted prior to
from node.ext.ldap import LDAPNode from node.ext.ldap import SUBTREE from node.ext.ldap import testing from node.ext.ldap.ugm import Group from node.ext.ldap.ugm import Groups from node.ext.ldap.ugm import RolesConfig from node.ext.ldap.ugm import Ugm from node.ext.ldap.ugm import User from node.ext.ldap.ugm import Users from node.ext.ldap.ugm._api import ACCOUNT_EXPIRED from node.ext.ldap.ugm._api import PrincipalAliasedAttributes from node.tests import NodeTestCase from odict import odict import ldap layer = testing.LDIF_posixGroups def create_ugm(): props = layer['props'] ucfg = layer['ucfg'] gcfg = layer['gcfg'] rcfg = None # XXX: later return Ugm(name='ugm', parent=None, props=props, ucfg=ucfg, gcfg=gcfg, rcfg=rcfg) def posix_groups_ugm(fn): def wrapper(self): fn(self, create_ugm()) return wrapper class TestUGMPosixGroups(NodeTestCase): layer = layer @posix_groups_ugm def test_basics(self, ugm): # Users object self.assertTrue(isinstance(ugm.users, Users)) self.assertTrue(ugm['users'] is ugm.users) # Groups object self.assertTrue(isinstance(ugm.groups, Groups)) self.assertTrue(ugm['groups'] is ugm.groups) # Try to delete from UGM, fails err = self.expect_error( NotImplementedError, ugm.__delitem__, 'users' ) self.assertEqual(str(err), 'Operation forbidden on this node.') # Try to set item by invalid key, fails err = self.expect_error( KeyError, ugm.__setitem__, 'inexistent', ugm.users ) self.assertEqual(str(err), "'inexistent'") @posix_groups_ugm def test_fetch_users(self, ugm): # User keys users = ugm.users self.assertEqual(users.keys(), [u'uid0', u'uid1', u'uid2']) # Fetch some users user_0 = users['uid0'] user_1 = users['uid1'] user_2 = users['uid2'] self.assertEqual(users.values(), [user_0, user_1, user_2]) self.assertTrue(isinstance(user_0, User)) self.assertTrue(isinstance(user_0.attrs, PrincipalAliasedAttributes)) self.assertEqual(user_0.attrs['cn'], 'cn0') self.assertEqual(user_0.attrs['sn'], 'sn0') self.assertEqual(user_0.attrs['login'], 'cn0') # XXX: LDAPNodeAttributes.items does not return consistent results if # attrmap points to same attribute multiple times self.assertEqual(sorted(user_0.attrs.items()), [ ('gidNumber', u'0'), ('homeDirectory', u'/home/uid0'), ('login', u'cn0'), ('rdn', u'uid0'), ('sn', u'sn0'), ('uidNumber', u'0') ]) # User is a leaf err = self.expect_error( NotImplementedError, user_0.__setitem__, 'foo', object() ) self.assertEqual(str(err), 'User does not support ``__setitem__``') err = self.expect_error( NotImplementedError, user_0.__delitem__, 'foo' ) self.assertEqual(str(err), 'User does not support ``__delitem__``') err = self.expect_error( NotImplementedError, user_0.__getitem__, 'foo' ) self.assertEqual(str(err), 'User does not support ``__getitem__``') self.assertEqual(user_0.keys(), []) @posix_groups_ugm def test_authenticate(self, ugm): # Authenticate self.assertEqual(ugm.users.authenticate('uid0', 'secret0'), 'uid0') self.assertEqual(ugm.users.authenticate('cn0', 'secret0'), 'uid0') self.assertEqual(ugm.users.authenticate('uid0', 'invalid'), False) self.assertEqual(ugm.users.authenticate('cn0', 'invalid'), False) self.assertEqual(ugm.users.authenticate('foo', 'secret0'), False) @posix_groups_ugm def test_account_expiration(self, ugm): users = ugm.users # Note: after changind expires attribute, user must be pesisted in # order to take expiration effect for authentication. Expires attribute # lookup is done against LDAP directly in ``users.authenticate`` # Expires attribute not set yet self.assertEqual(users.expiresAttr, None) self.assertFalse(users['uid0'].expired) # Set expires attribute for ongoing tests users.expiresAttr = 'shadowExpire' # Value 99999 and -1 means no expiration self.assertEqual(users['uid0'].context.attrs['shadowExpire'], u'99999') self.assertEqual(users['uid0'].context.attrs['shadowInactive'], u'0') self.assertEqual(users.authenticate('uid0', 'secret0'), u'uid0') self.assertFalse(users['uid0'].expired) # Expire a while ago users['uid0'].context.attrs['shadowExpire'] = '1' users['uid0']() res = users.authenticate('uid0', 'secret0') self.assertEqual(res, ACCOUNT_EXPIRED) self.assertFalse(bool(res)) self.assertTrue(users['uid0'].expired) # No expiration far future users['uid0'].context.attrs['shadowExpire'] = '99999' users['uid0']() self.assertEqual(users.authenticate('uid0', 'secret0'), u'uid0') self.assertFalse(users['uid0'].expired) # No expiration by '-1' users['uid0'].context.attrs['shadowExpire'] = '-1' users['uid0']() self.assertEqual(users.authenticate('uid0', 'secret0'), u'uid0') self.assertFalse(users['uid0'].expired) # Invalid expiration field data users.expiresAttr = 'uid' self.assertFalse(users.authenticate('uid0', 'secret0')) # XXX: figure out shadowInactive -> PAM and samba seem to ignore -> configuration? # users['uid0'].context.attrs['shadowInactive'] = u'99999' # Uid0 never expires - or at leas expires in many years and even if, there are # 99999 more days unless account gets disabled # self.assertEqual(users.authenticate('uid0', 'secret0'), u'uid0') # users['uid0'].context.attrs['shadowInactive'] = '0' @posix_groups_ugm def test_change_password(self, ugm): err = self.expect_error( ldap.UNWILLING_TO_PERFORM, ugm.users.passwd, 'uid0', 'foo', 'bar' ) self.assertEqual(err.args[0], { 'info': 'unwilling to verify old password', 'desc': 'Server is unwilling to perform' }) self.expect_error( KeyError, ugm.users.passwd, 'foo', 'secret0', 'bar' ) ugm.users.passwd('uid0', 'secret0', 'bar') self.assertEqual(ugm.users.authenticate('uid0', 'bar'), 'uid0') ugm.users.expiresAttr = 'shadowExpire' ugm.users.passwd('uid0', 'bar', 'secret0') self.assertEqual(ugm.users.authenticate('uid0', 'secret0'), 'uid0') @posix_groups_ugm def test_add_user(self, ugm): users = ugm.users self.check_output(""" <class 'node.ext.ldap.ugm._api.Users'>: users <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.User'>: uid1 <class 'node.ext.ldap.ugm._api.User'>: uid2 """, users.treerepr()) user = users.create( 'sepp', cn='Sepp', sn='Unterwurzacher', uidNumber='99', gidNumber='99', homeDirectory='home/sepp' ) self.assertTrue(isinstance(user, User)) # The user is added to tree self.check_output(""" <class 'node.ext.ldap.ugm._api.Users'>: users <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.User'>: uid1 <class 'node.ext.ldap.ugm._api.User'>: uid2 <class 'node.ext.ldap.ugm._api.User'>: sepp """, users.treerepr()) # Though, no authentication or password setting possible yet, because # tree is not persisted to LDAP yet self.assertFalse(users.authenticate('sepp', 'secret')) self.expect_error( KeyError, ugm.users.passwd, 'sepp', None, 'secret' ) # After calling, new user is available in LDAP ugm() ugm.users.passwd('sepp', None, 'secret') self.assertEqual(users.authenticate('sepp', 'secret'), 'sepp') # Delete already created user del users['sepp'] ugm() @posix_groups_ugm def test_fetch_groups(self, ugm): groups = ugm.groups self.assertEqual(groups.keys(), [u'group0', u'group1', u'group2']) group_0 = groups['group0'] group_1 = groups['group1'] group_2 = groups['group2'] self.assertEqual(groups.values(), [group_0, group_1, group_2]) self.assertTrue(isinstance(group_0, Group)) self.assertTrue(isinstance(group_0.attrs, PrincipalAliasedAttributes)) self.assertEqual(sorted(group_0.attrs.items()), [ ('gidNumber', '0'), ('memberUid', ['nobody', 'uid0']), ('rdn', 'group0') ]) self.assertEqual(sorted(group_1.attrs.items()), [ ('gidNumber', u'1'), ('memberUid', [u'nobody', u'uid0', u'uid1']), ('rdn', u'group1') ]) @posix_groups_ugm def test_add_group(self, ugm): groups = ugm.groups group = groups.create('group99', id='group99') self.assertTrue(isinstance(group, Group)) ugm() self.check_output(""" <class 'node.ext.ldap.ugm._api.Groups'>: groups <class 'node.ext.ldap.ugm._api.Group'>: group0 <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.Group'>: group1 <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.User'>: uid1 <class 'node.ext.ldap.ugm._api.Group'>: group2 <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.User'>: uid1 <class 'node.ext.ldap.ugm._api.User'>: uid2 <class 'node.ext.ldap.ugm._api.Group'>: group99 """, groups.treerepr()) # Delete already created group del groups['group99'] ugm() self.check_output(""" <class 'node.ext.ldap.ugm._api.Groups'>: groups <class 'node.ext.ldap.ugm._api.Group'>: group0 <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.Group'>: group1 <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.User'>: uid1 <class 'node.ext.ldap.ugm._api.Group'>: group2 <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.User'>: uid1 <class 'node.ext.ldap.ugm._api.User'>: uid2 """, groups.treerepr()) @posix_groups_ugm def test_group_memebership(self, ugm): users = ugm.users groups = ugm.groups # A group returns the members ids as keys group_0 = groups['group0'] group_1 = groups['group1'] group_2 = groups['group2'] self.assertEqual(group_0.member_ids, ['uid0']) self.assertEqual(group_1.member_ids, ['uid0', 'uid1']) self.assertEqual(group_2.member_ids, ['uid0', 'uid1', 'uid2']) # The member users are fetched via ``__getitem__`` user_1 = ugm.users['uid1'] self.assertTrue(user_1 is group_1['uid1']) # Querying a group for a non-member results in a KeyError self.expect_error(KeyError, group_0.__getitem__, 'uid1') # Deleting inexistend member from group fails self.expect_error(KeyError, group_0.__delitem__, 'inexistent') # ``__setitem__`` is prohibited err = self.expect_error( NotImplementedError, group_1.__setitem__, 'uid0', users['uid0'] ) self.assertEqual(str(err), 'Group does not support ``__setitem__``') # Members are added via ``add`` group_0.add('uid1') self.assertEqual(group_0.keys(), [u'uid0', u'uid1']) self.assertEqual(group_0.member_ids, [u'uid0', u'uid1']) self.assertTrue(group_0['uid0'] is ugm.users['uid0']) self.assertEqual(group_0.users, [users['uid0'], users['uid1']]) group_0() # Let's take a fresh view on ldap whether this really happened ugm_fresh = create_ugm() self.assertEqual(ugm_fresh.groups['group0'].keys(), [u'uid0', u'uid1']) # Members are removed via ``delitem`` del group_0['uid1'] ugm_fresh = create_ugm() self.assertEqual(ugm_fresh.groups['group0'].keys(), [u'uid0']) user_0 = ugm_fresh.users['uid0'] user_1 = ugm_fresh.users['uid1'] user_2 = ugm_fresh.users['uid2'] # A user knows its groups self.assertEqual(user_0.groups, [ ugm_fresh.groups['group0'], ugm_fresh.groups['group1'], ugm_fresh.groups['group2'] ]) self.assertEqual(user_1.groups, [ ugm_fresh.groups['group1'], ugm_fresh.groups['group2'] ]) self.assertEqual(user_2.groups, [ ugm_fresh.groups['group2'] ]) self.assertEqual(user_0.group_ids, ['group0', 'group1', 'group2']) self.assertEqual(user_1.group_ids, ['group1', 'group2']) self.assertEqual(user_2.group_ids, ['group2']) @posix_groups_ugm def test_search(self, ugm): users = ugm.users groups = ugm.groups # Test search function self.assertEqual(users.search(criteria={'login': 'cn0'}), [u'uid0']) self.assertEqual(groups.search(criteria={'id': 'group2'}), [u'group2']) @posix_groups_ugm def test_ids(self, ugm): users = ugm.users groups = ugm.groups # There's an ids property on principals base class self.assertEqual(users.ids, [u'uid0', u'uid1', u'uid2']) self.assertEqual(groups.ids, [u'group0', u'group1', u'group2']) @posix_groups_ugm def test_membership_assignment(self, ugm): users = ugm.users groups = ugm.groups # Add user to some groups and then delete user, check whether user # is removed from all this groups. user = users.create( 'sepp', cn='Sepp', sn='Unterwurzacher', uidNumber='99', gidNumber='99', homeDirectory='home/sepp' ) groups['group0'].add('sepp') groups['group1'].add('sepp') ugm() self.assertEqual(user.groups, [groups['group0'], groups['group1']]) self.assertEqual(user.group_ids, [u'group0', u'group1']) self.check_output(""" <class 'node.ext.ldap.ugm._api.Ugm'>: ugm <class 'node.ext.ldap.ugm._api.Users'>: users <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.User'>: uid1 <class 'node.ext.ldap.ugm._api.User'>: uid2 <class 'node.ext.ldap.ugm._api.User'>: sepp <class 'node.ext.ldap.ugm._api.Groups'>: groups <class 'node.ext.ldap.ugm._api.Group'>: group0 <class 'node.ext.ldap.ugm._api.User'>: sepp <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.Group'>: group1 <class 'node.ext.ldap.ugm._api.User'>: sepp <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.User'>: uid1 <class 'node.ext.ldap.ugm._api.Group'>: group2 <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.User'>: uid1 <class 'node.ext.ldap.ugm._api.User'>: uid2 """, ugm.treerepr()) del users['sepp'] ugm() self.check_output(""" <class 'node.ext.ldap.ugm._api.Ugm'>: ugm <class 'node.ext.ldap.ugm._api.Users'>: users <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.User'>: uid1 <class 'node.ext.ldap.ugm._api.User'>: uid2 <class 'node.ext.ldap.ugm._api.Groups'>: groups <class 'node.ext.ldap.ugm._api.Group'>: group0 <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.Group'>: group1 <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.User'>: uid1 <class 'node.ext.ldap.ugm._api.Group'>: group2 <class 'node.ext.ldap.ugm._api.User'>: uid0 <class 'node.ext.ldap.ugm._api.User'>: uid1 <class 'node.ext.ldap.ugm._api.User'>: uid2 """, ugm.treerepr()) @posix_groups_ugm def test_no_member_uid_attribute_yet(self, ugm): # Test case where group object does not have 'memberUid' attribute # set yet. node = LDAPNode( u'cn=group0,ou=groups,ou=posixGroups,dc=my-domain,dc=com', props=self.layer['props'] ) del node.attrs['memberUid'] node() group = ugm.groups['group0'] self.assertEqual(group.items(), []) group.add('uid0') group() node = LDAPNode( u'cn=group0,ou=groups,ou=posixGroups,dc=my-domain,dc=com', props=self.layer['props'] ) self.assertEqual(node.attrs['memberUid'], ['uid0']) @posix_groups_ugm def test_inexistent_member_reference(self, ugm): # Test case where group contains reference to inexistent member. node = LDAPNode( u'cn=group0,ou=groups,ou=posixGroups,dc=my-domain,dc=com', props=self.layer['props'] ) node.attrs['memberUid'] = ['uid0', 'inexistent'] node() group = ugm.groups['group0'] self.assertEqual(group.keys(), ['uid0']) node.attrs['memberUid'] = ['uid0'] node() def test_roles(self): # Role Management. Create container for roles. props = layer['props'] node = LDAPNode('dc=my-domain,dc=com', props) node['ou=roles'] = LDAPNode() node['ou=roles'].attrs['objectClass'] = ['organizationalUnit'] node() ucfg = layer['ucfg'] gcfg = layer['gcfg'] rcfg = RolesConfig( baseDN='ou=roles,dc=my-domain,dc=com', attrmap=odict(( ('rdn', 'cn'), ('id', 'cn') )), scope=SUBTREE, queryFilter='(objectClass=posixGroup)', objectClasses=['posixGroup'], defaults={}, strict=False ) ugm = Ugm(props=props, ucfg=ucfg, gcfg=gcfg, rcfg=rcfg) user = ugm.users['uid1'] self.assertEqual(ugm.roles(user), []) ugm.add_role('viewer', user) self.assertEqual(ugm.roles(user), ['viewer']) self.assertEqual(user.roles, ['viewer']) user =
without geo referencing """ import time, os from anuga.file.netcdf import NetCDFFile # Setup #from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular # Create basic mesh (20m x 3m) width = 3 length = 20 t_end = 3 points, vertices, boundary = rectangular(length, width, length, width) # Create shallow water domain domain = Domain(points, vertices, boundary) domain.default_order = 2 domain.set_minimum_storable_height(0.01) domain.set_name('flowtest') swwfile = domain.get_name() + '.sww' domain.set_datadir('.') domain.format = 'sww' domain.smooth = True h = 1.0 u = 2.0 uh = u*h Br = Reflective_boundary(domain) # Side walls Bd = Dirichlet_boundary([h, uh, 0]) # 2 m/s across the 3 m inlet: domain.set_quantity('elevation', 0.0) domain.set_quantity('stage', h) domain.set_quantity('xmomentum', uh) domain.set_boundary( {'left': Bd, 'right': Bd, 'top': Br, 'bottom': Br}) for t in domain.evolve(yieldstep=1, finaltime = t_end): pass # Check that momentum is as it should be in the interior I = [[0, width/2.], [length/2., width/2.], [length, width/2.]] f = file_function(swwfile, quantities=['stage', 'xmomentum', 'ymomentum'], interpolation_points=I, verbose=False) for t in range(t_end+1): for i in range(3): assert num.allclose(f(t, i), [1, 2, 0], atol=1.0e-6) # Check flows through the middle for i in range(5): x = length/2. + i*0.23674563 # Arbitrary cross_section = [[x, 0], [x, width]] time, Q = get_flow_through_cross_section(swwfile, cross_section, verbose=False) assert num.allclose(Q, uh*width) # Try the same with partial lines x = length/2. for i in range(5): start_point = [length/2., i*width/5.] #print start_point cross_section = [start_point, [length/2., width]] time, Q = get_flow_through_cross_section(swwfile, cross_section, verbose=False) #print i, Q, (width-start_point[1]) assert num.allclose(Q, uh*(width-start_point[1])) # Verify no flow when line is parallel to flow cross_section = [[length/2.-10, width/2.], [length/2.+10, width/2.]] time, Q = get_flow_through_cross_section(swwfile, cross_section, verbose=False) #print i, Q assert num.allclose(Q, 0, atol=1.0e-5) # Try with lines on an angle (all flow still runs through here) cross_section = [[length/2., 0], [length/2.+width, width]] time, Q = get_flow_through_cross_section(swwfile, cross_section, verbose=False) assert num.allclose(Q, uh*width) def test_get_flow_through_cross_section_stored_uniquely(self): """test_get_flow_through_cross_section_stored_uniquely(self): Test that the total flow through a cross section can be correctly obtained from an sww file. This test creates a flat bed with a known flow through it and tests that the function correctly returns the expected flow. The specifics are u = 2 m/s h = 1 m w = 3 m (width of channel) q = u*h*w = 6 m^3/s """ import time, os from anuga.file.netcdf import NetCDFFile # Setup #from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular # Create basic mesh (20m x 3m) width = 3 length = 20 t_end = 3 points, vertices, boundary = rectangular(length, width, length, width) # Create shallow water domain domain = Domain(points, vertices, boundary) domain.default_order = 2 domain.set_minimum_storable_height(0.01) domain.set_name('flowtest_uniquely') swwfile = domain.get_name() + '.sww' domain.set_store_vertices_uniquely() domain.set_datadir('.') domain.format = 'sww' domain.smooth = True h = 1.0 u = 2.0 uh = u*h Br = Reflective_boundary(domain) # Side walls Bd = Dirichlet_boundary([h, uh, 0]) # 2 m/s across the 3 m inlet: domain.set_quantity('elevation', 0.0) domain.set_quantity('stage', h) domain.set_quantity('xmomentum', uh) domain.set_boundary( {'left': Bd, 'right': Bd, 'top': Br, 'bottom': Br}) for t in domain.evolve(yieldstep=1, finaltime = t_end): pass # Check that momentum is as it should be in the interior I = [[0, width/2.], [length/2., width/2.], [length, width/2.]] f = file_function(swwfile, quantities=['stage', 'xmomentum', 'ymomentum'], interpolation_points=I, verbose=False) for t in range(t_end+1): for i in range(3): assert num.allclose(f(t, i), [1, 2, 0], atol=1.0e-6) # Check flows through the middle for i in range(5): x = length/2. + i*0.23674563 # Arbitrary cross_section = [[x, 0], [x, width]] time, Q = get_flow_through_cross_section(swwfile, cross_section, verbose=False) assert num.allclose(Q, uh*width) # Try the same with partial lines x = length/2. for i in range(5): start_point = [length/2., i*width/5.] #print start_point cross_section = [start_point, [length/2., width]] time, Q = get_flow_through_cross_section(swwfile, cross_section, verbose=False) #print i, Q, (width-start_point[1]) assert num.allclose(Q, uh*(width-start_point[1])) # Verify no flow when line is parallel to flow cross_section = [[length/2.-10, width/2.], [length/2.+10, width/2.]] time, Q = get_flow_through_cross_section(swwfile, cross_section, verbose=False) #print i, Q assert num.allclose(Q, 0, atol=1.0e-5) # Try with lines on an angle (all flow still runs through here) cross_section = [[length/2., 0], [length/2.+width, width]] time, Q = get_flow_through_cross_section(swwfile, cross_section, verbose=False) assert num.allclose(Q, uh*width) def test_get_flow_through_cross_section_with_geo(self): """test_get_flow_through_cross_section(self): Test that the total flow through a cross section can be correctly obtained from an sww file. This test creates a flat bed with a known flow through it and tests that the function correctly returns the expected flow. The specifics are u = 2 m/s h = 2 m w = 3 m (width of channel) q = u*h*w = 12 m^3/s This run tries it with georeferencing and with elevation = -1 """ import time, os from anuga.file.netcdf import NetCDFFile # Setup #from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular # Create basic mesh (20m x 3m) width = 3 length = 20 t_end = 1 points, vertices, boundary = rectangular(length, width, length, width) # Create shallow water domain domain = Domain(points, vertices, boundary, geo_reference = Geo_reference(56,308500,6189000)) domain.default_order = 2 domain.set_minimum_storable_height(0.01) domain.set_name('flowtest') swwfile = domain.get_name() + '.sww' domain.set_datadir('.') domain.format = 'sww' domain.smooth = True e = -1.0 w = 1.0 h = w-e u = 2.0 uh = u*h Br = Reflective_boundary(domain) # Side walls Bd = Dirichlet_boundary([w, uh, 0]) # 2 m/s across the 3 m inlet: domain.set_quantity('elevation', e) domain.set_quantity('stage', w) domain.set_quantity('xmomentum', uh) domain.set_boundary( {'left': Bd, 'right': Bd, 'top': Br, 'bottom': Br}) for t in domain.evolve(yieldstep=1, finaltime = t_end): pass # Check that momentum is as it should be in the interior I = [[0, width/2.], [length/2., width/2.], [length, width/2.]] I = domain.geo_reference.get_absolute(I) f = file_function(swwfile, quantities=['stage', 'xmomentum', 'ymomentum'], interpolation_points=I, verbose=False) for t in range(t_end+1): for i in range(3): #print i, t, f(t, i) assert num.allclose(f(t, i), [w, uh, 0], atol=1.0e-6) # Check flows through the middle for i in range(5): x = length/2. + i*0.23674563 # Arbitrary cross_section = [[x, 0], [x, width]] cross_section = domain.geo_reference.get_absolute(cross_section) time, Q = get_flow_through_cross_section(swwfile, cross_section, verbose=False) assert num.allclose(Q, uh*width) def test_get_energy_through_cross_section(self): """test_get_energy_through_cross_section(self): Test that the specific and total energy through a cross section can be correctly obtained from an sww file. This test creates a flat bed with a known flow through it and tests that the function correctly returns the expected energies. The specifics are u = 2 m/s h = 1 m w = 3 m (width of channel) q = u*h*w = 6 m^3/s Es = h + 0.5*v*v/g # Specific energy head [m] Et = w + 0.5*v*v/g # Total energy head [m] This test uses georeferencing """ import time, os from anuga.file.netcdf import NetCDFFile # Setup #from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular # Create basic mesh (20m x 3m) width = 3 length = 20 t_end = 1 points, vertices, boundary = rectangular(length, width, length, width) # Create shallow water domain domain = Domain(points, vertices, boundary, geo_reference = Geo_reference(56,308500,6189000)) domain.default_order = 2 domain.set_minimum_storable_height(0.01) domain.set_name('flowtest') swwfile = domain.get_name() + '.sww' domain.set_datadir('.') domain.format = 'sww' domain.smooth = True e = -1.0 w = 1.0 h = w-e u = 2.0 uh = u*h Br = Reflective_boundary(domain) # Side walls Bd = Dirichlet_boundary([w, uh, 0]) # 2 m/s across the 3 m inlet: domain.set_quantity('elevation', e) domain.set_quantity('stage', w) domain.set_quantity('xmomentum', uh) domain.set_boundary( {'left': Bd, 'right': Bd, 'top': Br, 'bottom': Br}) for t in domain.evolve(yieldstep=1, finaltime = t_end): pass # Check that momentum is as it should be in the interior I = [[0, width/2.], [length/2., width/2.], [length, width/2.]] I = domain.geo_reference.get_absolute(I) f = file_function(swwfile, quantities=['stage', 'xmomentum', 'ymomentum'], interpolation_points=I, verbose=False) for t in range(t_end+1): for i in range(3): #print i, t, f(t, i) assert num.allclose(f(t, i), [w, uh, 0], atol=1.0e-6) # Check energies through the middle for i in range(5): x = length/2. + i*0.23674563 # Arbitrary cross_section = [[x, 0], [x, width]] cross_section = domain.geo_reference.get_absolute(cross_section) time, Es = get_energy_through_cross_section(swwfile, cross_section, kind='specific', verbose=False) assert num.allclose(Es, h + 0.5*u*u/g)
Space Science "2041-8213": ["IOP Publishing", "American Astronomical Society"], # The Astrophysical Journal "0024-6107": ["Oxford University Press (OUP)", "Wiley-Blackwell"], # Journal of the London Mathematical Society "2169-9313": ["Wiley-Blackwell", "American Geophysical Union (AGU)"], # Journal of Geophysical Research: Solid Earth "2169-9356": ["Wiley-Blackwell", "American Geophysical Union (AGU)"], # Journal of Geophysical Research: Solid Earth (electronic) "0022-3166": ["American Society for Nutrition", "Oxford University Press (OUP)"],# Journal of Nutrition "1541-6100": ["American Society for Nutrition", "Oxford University Press (OUP)"], # Journal of Nutrition (electronic) "1651-2235": ["Co-Action Publishing", "Informa UK Limited"], # Microbial Ecology in Health & Disease "0891-060X": ["Co-Action Publishing", "Informa UK Limited"], # Microbial Ecology in Health & Disease (linking) "2575-1433": ["HAU, Journal of Ethnographic Theory", "University of Chicago Press"], # HAU: Journal of Ethnographic Theory "2222-1751": ["Springer Nature", "Informa UK Limited"], # Emerging Microbes & Infections "0013-0133": ["Wiley-Blackwell", "Oxford University Press (OUP)"], # The Economic Journal "1358-3883": ["Informa UK Limited", "Springer Nature"], # Tertiary Education and Management "1573-1936": ["Informa UK Limited", "Springer Nature"], # Tertiary Education and Management (electronic) "1559-8608": ["Informa UK Limited", "Springer Nature"], # Journal of Statistical Theory and Practice "1559-8616": ["Informa UK Limited", "Springer Nature"], # Journal of Statistical Theory and Practice (electronic) "2199-8531": ["Springer Nature", "MDPI AG"], # Journal of Open Innovation: Technology, Market, and Complexity "1939-4551": ["Springer Nature", "Elsevier BV"], # World Allergy Organization Journal "1015-8987": ["<NAME> AG", "Cell Physiol Biochem Press GmbH and Co KG"], # Cellular Physiology and Biochemistry "1421-9778": ["<NAME>arger AG", "Cell Physiol Biochem Press GmbH and Co KG"], # Cellular Physiology and Biochemistry (electronic) "2052-4986": ["Oxford University Press (OUP)", "Wiley-Blackwell"], # Transactions of the London Mathematical Society "2169-9097": ["Wiley-Blackwell", "American Geophysical Union (AGU)"], # Journal of Geophysical Research JGR / E - Planets "0048-6604": ["Wiley-Blackwell", "American Geophysical Union (AGU)"], # Radio Science "1747-0218": ["Informa UK Limited", "SAGE Publications"], # (The) Quarterly Journal of Experimental Psychology "1747-0226": ["Informa UK Limited", "SAGE Publications"], # (The) Quarterly Journal of Experimental Psychology (electronic) "1461-9571": ["Informa UK Limited", "Cambridge University Press (CUP)"], # European Journal of Archaeology "1741-2722": ["Informa UK Limited", "Cambridge University Press (CUP)"], # European Journal of Archaeology (electronic) "1179-1349": ["Dove Medical Press Ltd.", "Informa UK Limited"], # Clinical Epidemiology "1179-1322": ["Dove Medical Press Ltd.", "Informa UK Limited"], # Cancer Management and Research "1178-7090": ["Dove Med,ical Press Ltd.", "Informa UK Limited"], # Journal of Pain Research "1179-1608": ["Dove Medical Press Ltd.", "Informa UK Limited"], # Nature and Science of Sleep "1178-2021": ["Dove Medical Press Ltd.", "Informa UK Limited"], # Neuropsychiatric Disease and Treatment "2155-384X": ["Springer Nature", "Ovid Technologies (Wolters Kluwer Health)"], # Clinical and Translational Gastroenterology "0009-921X": ["Springer Science + Business Media", "Springer Nature", "Ovid Technologies (Wolters Kluwer Health)"], # Clinical Orthopaedics and Related Research® "1179-5549": ["Libertas Academica, Ltd.", "SAGE Publications"], # Clinical Medicine Insights: Oncology "0141-8955": ["Springer Science + Business Media", "Springer Nature", "Wiley-Blackwell"], # Journal of Inherited Metabolic Disease "0261-3875": ["Wiley-Blackwell", "Cambridge University Press (CUP)"], # Legal Studies "1748-121X": ["Wiley-Blackwell", "Cambridge University Press (CUP)"], # Legal Studies (electronic) "2045-824X": ["Springer Science + Business Media", "Publiverse Online S.R.L"], # Vascular Cell "1869-4179": ["Springer Nature", "Walter de Gruyter GmbH"], # Raumforschung und Raumordnung "0034-0111": ["Springer Nature", "Walter de Gruyter GmbH"], # Raumforschung und Raumordnung (linking) "1552-5260": ["Elsevier BV", "Wiley-Blackwell"], # Alzheimer's & Dementia "1807-5932": ["FapUNIFESP (SciELO)", "Fundacao Faculdade de Medicina"], # Clinics "1617-9625": ["Springer Science + Business Media", "Springer Nature", "E.U. European Publishing"], # Tobacco Induced Diseases, at EP since 2018 "1878-7649": ["Elsevier BV", "Springer Nature"], # European Geriatric Medicine "0032-5791": ["Oxford University Press (OUP)", "Elsevier BV"], # Poultry Science "2050-490X": ["Springer Science + Business Media", "EDP Sciences"], # Regenerative Medicine Research "1438-3896": ["Springer Nature", "Wiley-Blackwell"], # Population Ecology "1438-390X": ["Springer Nature", "Wiley-Blackwell"], # Population Ecology (electronic) "1440-1711": ["Nature Publishing Group", "Springer Nature", "Wiley-Blackwell"], # Immunology and Cell Biology "1616-5047": ["Elsevier BV", "Springer Nature"], # Mammalian Biology "0892-6638": ["FASEB", "Wiley-Blackwell"], # The FASEB Journal "1530-6860": ["FASEB", "Wiley-Blackwell"], # The FASEB Journal (electronic) "0935-1221": ["Schweizerbart", "Copernicus GmbH"], # European Journal of Mineralogy (linking) "2049-6958": ["Springer Nature", "PAGEPress Publications"], # Multidisciplinary Respiratory Medicine "1828-695X": ["Springer Nature", "PAGEPress Publications"], # Multidisciplinary Respiratory Medicine (linking) "0924-9338": ["Elsevier BV", "Royal College of Psychiatrists"], # European Psychiatry "1778-3585": ["Elsevier BV", "Royal College of Psychiatrists"], # European Psychiatry (electronic) "0090-5992": ["Informa UK Limited", "Cambridge University Press (CUP)"], # Nationalities Papers "1465-3923": ["Informa UK Limited", "Cambridge University Press (CUP)"], # Nationalities Papers (electronic) "1460-244X": ["Oxford University Press (OUP)", "Wiley-Blackwell"], # Proceedings of the London Mathematical Society "0860-021X": ["Index Copernicus", "Termedia Sp. z.o.o."], # Biology of Sport "1056-6171": ["Oxford University Press (OUP)", "Elsevier BV"], # Journal of Applied Poultry Research "1341-9145": ["Japanese Association of Industrial Health", "Wiley-Blackwell"], # Journal of Occupational Health "1348-9585": ["Japanese Association of Industrial Health", "Wiley-Blackwell"], # Journal of Occupational Health (electronic) "2059-7029": ["BMJ", "Elsevier BV"], # ESMO Open "2001-1326": ["Springer Science + Business Media", "Springer Nature", "Wiley-Blackwell"], # Clinical and Translational Medicine "2050-0068": ["Springer Nature", "Wiley-Blackwell"], # Clinical & Translational Immunology "1757-448X": ["IOS Press", "IMR Press"], # Journal of Integrative Neuroscience "0219-6352": ["IOS Press", "IMR Press"], # Journal of Integrative Neuroscience (linking) "1539-1663": ["Soil Science Society of America", "Wiley-Blackwell"], # Vadose Zone Journal "2352-8737": ["Elsevier BV", "Wiley-Blackwell"], # Alzheimer's & Dementia : Translational Research and Clinical Interventions "2414-6641": ["Universitaet Innsbruck - Innsbruck University Press", "University of Bern"], # Current Issues in Sport Science (CISS) "0003-021X": ["Springer Nature", "Wiley-Blackwell"], # Journal of the American Oil Chemists' Society "1558-9331": ["Springer Nature", "Wiley-Blackwell"], # Journal of the American Oil Chemists' Society (electronic) "1995-8692": ["Bern Open Publishing", "University of Bern"], # Journal of Eye Movement Research "2051-1426": ["Springer Nature", "BMJ"], # Journal for ImmunoTherapy of Cancer "1474-905X": ["Royal Society of Chemistry (RSC)", "Springer Nature"], # Photochemical & Photobiological Sciences "1474-9092": ["Royal Society of Chemistry (RSC)", "Springer Nature"], # Photochemical & Photobiological Sciences (electronic) "2245-0157": ["Aarhus University Library", "Det Kgl. Bibliotek/Royal Danish Library"], # Nordic Journal of Working Life Studies "0884-2914": ["Cambridge University Press (CUP)", "Springer Nature"], # Journal of Materials Research (JMR) "2044-5326": ["Cambridge University Press (CUP)", "Springer Nature"], # Journal of Materials Research (JMR) (electronic) "0884-1616": ["Cambridge University Press (CUP)", "Springer Nature"], # Journal of Materials Research (JMR) (linking) "0024-4201": ["Springer Nature", "Wiley-Blackwell"], # Lipids "1558-9307": ["Springer Nature", "Wiley-Blackwell"] # Lipids (electronic) } # A whiltelist for denoting changes in journal full open access policy. ISSNs # listed here will not be checked for equal "is_hybrid" status by the name_consistency # test. Note that we make not further attempts in determining the correct hybrid # status for any journal listed here (like trying to track a point of time were the # policy change occured), it is up to the contributing institutions to deliver # correct data in these cases. JOURNAL_HYBRID_STATUS_CHANGED = [ "2041-1723", # Nature Communications "1474-9718", # Aging Cell "1555-8932", # Genes & Nutrition "1756-1833", # BMJ (fully OA status disputed, "added value" content not OA) "1461-1457", # International Journal of Neuropsychopharmacology "1552-5783", # Investigative Opthalmology & Visual Science, OA since 01/2016 "0001-4966", # The Journal of the Acoustical Society of America, archives hybrid and non-hybrid sub-journals "0887-0446", # Psychology & Health, status unclear -> Possible mistake in Konstanz U data "0066-4804", # Antimicrobial Agents and Chemotherapy -> delayed OA journal. Borderline case, needs further discussion "0022-1430", # Journal of Glaciology, Gold OA since 2016 "1467-7644", # Plant Biotechnology Journal, Gold OA since 2016 "2046-2069", # RSC Advances, Gold OA since 01/2017 "2041-6520", # Chemical Science, Gold OA since 2015 "0260-3055", # Annals of Glaciology, Gold OA since 2016 "1744-5647", # Journal of Maps, Gold OA since 09/2016 "1445-5781", # Reproductive Medicine and Biology, Gold OA since 2017 "2522-0144", # Research in the Mathematical Sciences, Hybrid since 2018 "1574-7891", # Molecular Oncology, Gold OA since 2/2017 "1749-5016", # Social Cognitive and Affective Neuroscience, Gold OA since 2017 "0161-0457", # Scanning, Gold OA since 2017 "2300-3235", # Bulletin of the Veterinary Institute in Puławy, Gold OA since 2016 "1461-1457", # International Journal
power = {'BUSES': {'Area': 1.33155, 'Bus/Area': 1.33155, 'Bus/Gate Leakage': 0.00662954, 'Bus/Peak Dynamic': 0.0, 'Bus/Runtime Dynamic': 0.0, 'Bus/Subthreshold Leakage': 0.0691322, 'Bus/Subthreshold Leakage with power gating': 0.0259246, 'Gate Leakage': 0.00662954, 'Peak Dynamic': 0.0, 'Runtime Dynamic': 0.0, 'Subthreshold Leakage': 0.0691322, 'Subthreshold Leakage with power gating': 0.0259246}, 'Core': [{'Area': 32.6082, 'Execution Unit/Area': 8.2042, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.134375, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.308232, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.791398, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.122718, 'Execution Unit/Instruction Scheduler/Area': 2.17927, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.347161, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.601157, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.34478, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.2931, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.221822, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 6.68559, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.149512, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0125848, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.138639, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0930726, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.288151, 'Execution Unit/Register Files/Runtime Dynamic': 0.105657, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.371823, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.911432, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155, 'Execution Unit/Runtime Dynamic': 3.0238, 'Execution Unit/Subthreshold Leakage': 1.83518, 'Execution Unit/Subthreshold Leakage with power gating': 0.709678, 'Gate Leakage': 0.372997, 'Instruction Fetch Unit/Area': 5.86007, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000824659, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000824659, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000720848, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000280459, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.001337, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00370716, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00781487, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0590479, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0894731, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.69125, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.234878, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.303891, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 8.1902, 'Instruction Fetch Unit/Runtime Dynamic': 0.639764, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932587, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0752046, 'L2/Runtime Dynamic': 0.0190957, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80969, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 4.6102, 'Load Store Unit/Data Cache/Runtime Dynamic': 1.64725, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0351387, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.109127, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.109127, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 5.12762, 'Load Store Unit/Runtime Dynamic': 2.29455, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.269089, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.538177, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591622, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283406, 'Memory Management Unit/Area': 0.434579, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.0955005, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.096578, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00813591, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.353861, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0386585, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.677462, 'Memory Management Unit/Runtime Dynamic': 0.135237, 'Memory Management Unit/Subthreshold Leakage': 0.0769113, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462, 'Peak Dynamic': 25.3178, 'Renaming Unit/Area': 0.369768, 'Renaming Unit/FP Front End RAT/Area': 0.168486, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.521614, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925, 'Renaming Unit/Free List/Area': 0.0414755, 'Renaming Unit/Free List/Gate Leakage': 4.15911e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0401324, 'Renaming Unit/Free List/Runtime Dynamic': 0.0240286, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987, 'Renaming Unit/Gate Leakage': 0.00863632, 'Renaming Unit/Int Front End RAT/Area': 0.114751, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.170742, 'Renaming Unit/Int Front End RAT/Subthreshold
123)) == (addrs.broadcast, 123, *addrs.extra) # yapf: enable # But not if it's true (at least on systems where getaddrinfo works # correctly) if v6 and not gai_without_v4mapped_is_buggy(): sock.setsockopt(tsocket.IPPROTO_IPV6, tsocket.IPV6_V6ONLY, True) with pytest.raises(tsocket.gaierror) as excinfo: await res(("1.2.3.4", 80)) # Windows, macOS expected_errnos = {tsocket.EAI_NONAME} # Linux if hasattr(tsocket, "EAI_ADDRFAMILY"): expected_errnos.add(tsocket.EAI_ADDRFAMILY) assert excinfo.value.errno in expected_errnos # A family where we know nothing about the addresses, so should just # pass them through. This should work on Linux, which is enough to # smoke test the basic functionality... try: netlink_sock = tsocket.socket( family=tsocket.AF_NETLINK, type=tsocket.SOCK_DGRAM ) except (AttributeError, OSError): pass else: assert await getattr(netlink_sock, resolver)("asdf") == "asdf" with pytest.raises(ValueError): await res("1.2.3.4") with pytest.raises(ValueError): await res(("1.2.3.4",)) with pytest.raises(ValueError): if v6: await res(("1.2.3.4", 80, 0, 0, 0)) else: await res(("1.2.3.4", 80, 0, 0)) async def test_SocketType_unresolved_names(): with tsocket.socket() as sock: await sock.bind(("localhost", 0)) assert sock.getsockname()[0] == "127.0.0.1" sock.listen(10) with tsocket.socket() as sock2: await sock2.connect(("localhost", sock.getsockname()[1])) assert sock2.getpeername() == sock.getsockname() # check gaierror propagates out with tsocket.socket() as sock: with pytest.raises(tsocket.gaierror): # definitely not a valid request await sock.bind(("1.2:3", -1)) # This tests all the complicated paths through _nonblocking_helper, using recv # as a stand-in for all the methods that use _nonblocking_helper. async def test_SocketType_non_blocking_paths(): a, b = stdlib_socket.socketpair() with a, b: ta = tsocket.from_stdlib_socket(a) b.setblocking(False) # cancel before even calling b.send(b"1") with _core.CancelScope() as cscope: cscope.cancel() with assert_checkpoints(): with pytest.raises(_core.Cancelled): await ta.recv(10) # immedate success (also checks that the previous attempt didn't # actually read anything) with assert_checkpoints(): await ta.recv(10) == b"1" # immediate failure with assert_checkpoints(): with pytest.raises(TypeError): await ta.recv("haha") # block then succeed async def do_successful_blocking_recv(): with assert_checkpoints(): assert await ta.recv(10) == b"2" async with _core.open_nursery() as nursery: nursery.start_soon(do_successful_blocking_recv) await wait_all_tasks_blocked() b.send(b"2") # block then cancelled async def do_cancelled_blocking_recv(): with assert_checkpoints(): with pytest.raises(_core.Cancelled): await ta.recv(10) async with _core.open_nursery() as nursery: nursery.start_soon(do_cancelled_blocking_recv) await wait_all_tasks_blocked() nursery.cancel_scope.cancel() # Okay, here's the trickiest one: we want to exercise the path where # the task is signaled to wake, goes to recv, but then the recv fails, # so it has to go back to sleep and try again. Strategy: have two # tasks waiting on two sockets (to work around the rule against having # two tasks waiting on the same socket), wake them both up at the same # time, and whichever one runs first "steals" the data from the # other: tb = tsocket.from_stdlib_socket(b) async def t1(): with assert_checkpoints(): assert await ta.recv(1) == b"a" with assert_checkpoints(): assert await tb.recv(1) == b"b" async def t2(): with assert_checkpoints(): assert await tb.recv(1) == b"b" with assert_checkpoints(): assert await ta.recv(1) == b"a" async with _core.open_nursery() as nursery: nursery.start_soon(t1) nursery.start_soon(t2) await wait_all_tasks_blocked() a.send(b"b") b.send(b"a") await wait_all_tasks_blocked() a.send(b"b") b.send(b"a") # This tests the complicated paths through connect async def test_SocketType_connect_paths(): with tsocket.socket() as sock: with pytest.raises(ValueError): # Should be a tuple await sock.connect("localhost") # cancelled before we start with tsocket.socket() as sock: with _core.CancelScope() as cancel_scope: cancel_scope.cancel() with pytest.raises(_core.Cancelled): await sock.connect(("127.0.0.1", 80)) # Cancelled in between the connect() call and the connect completing with _core.CancelScope() as cancel_scope: with tsocket.socket() as sock, tsocket.socket() as listener: await listener.bind(("127.0.0.1", 0)) listener.listen() # Swap in our weird subclass under the trio.socket._SocketType's # nose -- and then swap it back out again before we hit # wait_socket_writable, which insists on a real socket. class CancelSocket(stdlib_socket.socket): def connect(self, *args, **kwargs): cancel_scope.cancel() sock._sock = stdlib_socket.fromfd( self.detach(), self.family, self.type ) sock._sock.connect(*args, **kwargs) # If connect *doesn't* raise, then pretend it did raise BlockingIOError # pragma: no cover sock._sock.close() sock._sock = CancelSocket() with assert_checkpoints(): with pytest.raises(_core.Cancelled): await sock.connect(listener.getsockname()) assert sock.fileno() == -1 # Failed connect (hopefully after raising BlockingIOError) with tsocket.socket() as sock: with pytest.raises(OSError): # TCP port 2 is not assigned. Pretty sure nothing will be # listening there. (We used to bind a port and then *not* call # listen() to ensure nothing was listening there, but it turns # out on macOS if you do this it takes 30 seconds for the # connect to fail. Really. Also if you use a non-routable # address. This way fails instantly though. As long as nothing # is listening on port 2.) await sock.connect(("127.0.0.1", 2)) async def test_resolve_remote_address_exception_closes_socket(): # Here we are testing issue 247, any cancellation will leave the socket closed with _core.CancelScope() as cancel_scope: with tsocket.socket() as sock: async def _resolve_remote_address(self, *args, **kwargs): cancel_scope.cancel() await _core.checkpoint() sock._resolve_remote_address = _resolve_remote_address with assert_checkpoints(): with pytest.raises(_core.Cancelled): await sock.connect('') assert sock.fileno() == -1 async def test_send_recv_variants(): a, b = tsocket.socketpair() with a, b: # recv, including with flags assert await a.send(b"x") == 1 assert await b.recv(10, tsocket.MSG_PEEK) == b"x" assert await b.recv(10) == b"x" # recv_into await a.send(b"x") buf = bytearray(10) await b.recv_into(buf) assert buf == b"x" + b"\x00" * 9 if hasattr(a, "sendmsg"): assert await a.sendmsg([b"xxx"], []) == 3 assert await b.recv(10) == b"xxx" a = tsocket.socket(type=tsocket.SOCK_DGRAM) b = tsocket.socket(type=tsocket.SOCK_DGRAM) with a, b: await a.bind(("127.0.0.1", 0)) await b.bind(("127.0.0.1", 0)) targets = [b.getsockname(), ("localhost", b.getsockname()[1])] # recvfrom + sendto, with and without names for target in targets: assert await a.sendto(b"xxx", target) == 3 (data, addr) = await b.recvfrom(10) assert data == b"xxx" assert addr == a.getsockname() # sendto + flags # # I can't find any flags that send() accepts... on Linux at least # passing MSG_MORE to send_some on a connected UDP socket seems to # just be ignored. # # But there's no MSG_MORE on Windows or macOS. I guess send_some flags # are really not very useful, but at least this tests them a bit. if hasattr(tsocket, "MSG_MORE"): await a.sendto(b"xxx", tsocket.MSG_MORE, b.getsockname()) await a.sendto(b"yyy", tsocket.MSG_MORE, b.getsockname()) await a.sendto(b"zzz", b.getsockname()) (data, addr) = await b.recvfrom(10) assert data == b"xxxyyyzzz" assert addr == a.getsockname() # recvfrom_into assert await a.sendto(b"xxx", b.getsockname()) == 3 buf = bytearray(10) (nbytes, addr) = await b.recvfrom_into(buf) assert nbytes == 3 assert buf == b"xxx" + b"\x00" * 7 assert addr == a.getsockname() if hasattr(b, "recvmsg"): assert await a.sendto(b"xxx", b.getsockname()) == 3 (data, ancdata, msg_flags, addr) = await b.recvmsg(10) assert data == b"xxx" assert ancdata == [] assert msg_flags == 0 assert addr == a.getsockname() if hasattr(b, "recvmsg_into"): assert await a.sendto(b"xyzw", b.getsockname()) == 4 buf1 = bytearray(2) buf2 = bytearray(3) ret = await b.recvmsg_into([buf1, buf2]) (nbytes, ancdata, msg_flags, addr) = ret assert nbytes == 4 assert buf1 == b"xy" assert buf2 == b"zw" + b"\x00" assert ancdata == [] assert msg_flags == 0 assert addr == a.getsockname() if hasattr(a, "sendmsg"): for target in targets: assert await a.sendmsg([b"x", b"yz"], [], 0, target) == 3 assert await b.recvfrom(10) == (b"xyz", a.getsockname()) a = tsocket.socket(type=tsocket.SOCK_DGRAM) b = tsocket.socket(type=tsocket.SOCK_DGRAM) with a, b: await b.bind(("127.0.0.1", 0)) await a.connect(b.getsockname()) # send on a connected udp socket; each call creates a separate # datagram await a.send(b"xxx") await a.send(b"yyy") assert await b.recv(10) == b"xxx" assert await b.recv(10) == b"yyy" async def test_idna(monkeygai): # This is the encoding for "faß.de", which uses one of the characters that # IDNA 2003 handles incorrectly: monkeygai.set("ok faß.de", b"xn--fa-hia.de", 80) monkeygai.set("ok ::1", "::1", 80, flags=_NUMERIC_ONLY) monkeygai.set("ok ::1", b"::1", 80, flags=_NUMERIC_ONLY) # Some things that should not reach the underlying socket.getaddrinfo: monkeygai.set("bad", "fass.de", 80) # We always call socket.getaddrinfo with bytes objects: monkeygai.set("bad", "xn--fa-hia.de", 80) assert "ok ::1" == await tsocket.getaddrinfo("::1", 80) assert "ok ::1" == await tsocket.getaddrinfo(b"::1", 80) assert "ok faß.de" == await tsocket.getaddrinfo("faß.de", 80) assert "ok faß.de" == await tsocket.getaddrinfo("xn--fa-hia.de", 80) assert "ok faß.de" == await tsocket.getaddrinfo(b"xn--fa-hia.de", 80) async def test_getprotobyname(): # These are the constants used in IP header fields, so the numeric values # had *better* be stable across systems... assert await tsocket.getprotobyname("udp") == 17 assert await tsocket.getprotobyname("tcp") == 6 async def test_custom_hostname_resolver(monkeygai): class CustomResolver: async def getaddrinfo(self, host, port, family, type, proto, flags): return ("custom_gai", host, port, family, type, proto, flags) async def getnameinfo(self, sockaddr, flags): return ("custom_gni", sockaddr, flags) cr = CustomResolver() assert tsocket.set_custom_hostname_resolver(cr) is None # Check that the arguments are all getting passed through. # We have to use valid calls to avoid making
from typing import Tuple import pickle from highway_env.vehicle.kinematics import Vehicle from reeds_shepp_curves import reeds_shepp as rs from reeds_shepp_curves import utils from PythonRobotics.PathPlanning.RRTStarReedsShepp import rrt_star_reeds_shepp as rrts import matplotlib.pyplot as plt import math from operator import itemgetter import itertools from datetime import datetime, timedelta import random from openpyxl import Workbook from gym.envs.registration import register from gym import GoalEnv import numpy as np from numpy.core._multiarray_umath import ndarray from highway_env.envs.common.abstract import AbstractEnv from highway_env.envs.common.observation import MultiAgentObservation from highway_env.road.lane import StraightLane, LineType from highway_env.road.road import Road, RoadNetwork from highway_env.vehicle.objects import Landmark from highway_env.vehicle.objects import Obstacle from highway_env.moves.move import Move class ParkingEnv(AbstractEnv, GoalEnv): """ A continuous control environment. It implements a reach-type task, where the agent observes their position and speed and must control their acceleration and steering so as to reach a given goal. Credits to <NAME> for the idea and initial implementation. """ REWARD_WEIGHTS: ndarray = np.array([1, 1, 0, 0, 0.02, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) #1, 0.3, 0, 0, 0.02, 0.02 #px, py, vx, vy, sin(a), cos(a) SUCCESS_GOAL_REWARD: float = 0.10 #0.12 by default STEERING_RANGE: float = np.deg2rad(45) firstFrame = True previousAction = [] currentAction = [] parking_lines_positions = [] path_scaling = 0.1 #RS TURNING RADIUS ADJUSTMENT path_points_gap = 0.1 parkingPaths = {} steering_diff_threshold = 1 #1 by default complete_path_length = 0 last_time_path_index_evolved = datetime.now() lowest_recorded_path_complete_coeficient = 1.0000000001 print_success = True previous_path_long_dist = 0 previous_path_complete_coeficient = 1 previous_path_tangent_velocity = 0 num_of_timesteps_in_episode = 0 sum_distance_to_path = 0 last_100_episodes_success_indicator = [] total_cumulative_reward = 0 episode_counter = 0 workbook = None worksheet = None workbook_file_path = "Sheet/file" workbook_testing = None worksheet_testing = None workbook_testing_file_path = "Sheet/" #"Sheets/Final/Results/dummy.xlsx" def __init__(self) -> None: super().__init__() self.move = None @classmethod def default_config(cls) -> dict: config = super().default_config() config.update({ "observation": { "type": "KinematicsGoal", "features": ['x', 'y', 'vx', 'vy', 'cos_h', 'sin_h',\ 'current_acceleration', 'front_wheels_heading', 'absolute_velocity',\ 'path_dist', 'cos_path_ang_diff', 'sin_path_ang_diff', 'path_long_dist',\ 'next_path_dist', 'cos_next_path_ang_diff', 'sin_next_path_ang_diff', 'next_path_long_dist',\ 'next_next_path_dist', 'cos_next_next_path_ang_diff', 'sin_next_next_path_ang_diff', 'next_next_path_long_dist',\ 'next_next_next_path_dist', 'cos_next_next_next_path_ang_diff', 'sin_next_next_next_path_ang_diff', 'next_next_next_path_long_dist'], "scales": [100, 100, 5, 5, 1, 1, 5, 1, 40, 50, 1, 1, 25, 50, 1, 1, 25, 50, 1, 1, 25, 50, 1, 1, 25], "normalize": False }, "action": { "type": "ContinuousAction" }, "simulation_frequency": 15, "policy_frequency": 5, "duration": 300, "screen_width": 1920 * 2, "screen_height": 1080 * 2, "centering_position": [0.5, 0.5], "scaling": 10 * 2*2, "controlled_vehicles": 1, "collision_reward": -0.1, "layoutType": 0, "gridSizeX": 6, "gridSizeY": 2, "gridSpotWidth": 4, "gridSpotLength": 8, "corridorWidth": 9, "orientationMode": 7, #1: FinalGoal // 2: Three phase goal // 3: Step by step path // 4: Path with small gaps // 5 - RS with obstacles // 6 - Proximity to path // 7 - Path Tracking "trackRear": 1, "randomInitialState": 0, "initialPosition": [[20, 0],[20, 1], [20, -1], [19, 0], [19, 1], [19, -1], [-20, 0], [-20, 1], [-20, -1], [-19, 0], [-19, 1], [-19, -1]],#[[20, 0],[-20, 0]],# "initialHeading": 0, "startingPhase": 2, "endingPhase": 3, "obstacles": 1, "otherVehicles": 1, "generateNewPaths": 0, "pathsFileName": "paths_6x2", "randomPath": 0, "goalSpotNumber": 7, "initialPositionNumber": 6 }) return config def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, dict]: if not self.firstFrame: self.previousAction = self.currentAction self.currentAction = action obs, reward, terminal, info = super().step(action) is_it_success = self._is_success(obs['achieved_goal'], obs['desired_goal']) if isinstance(self.observation_type, MultiAgentObservation): if self.config["orientationMode"] == 1 or self.config["orientationMode"] == 6 or self.config["orientationMode"] == 7: success = tuple((self._is_success(agent_obs['achieved_goal'], agent_obs['desired_goal'])) for agent_obs in obs) elif self.config["orientationMode"] == 2: success = tuple((self._is_success(agent_obs['achieved_goal'], agent_obs['desired_goal']) and self.move.phase >= self.config["endingPhase"]) for agent_obs in obs) elif self.config["orientationMode"] == 3 or self.config["orientationMode"] == 4 or self.config["orientationMode"] == 5: success = tuple((self._is_success(agent_obs['achieved_goal'], agent_obs['desired_goal']) and self.move.path_index >= len(self.move.path)) for agent_obs in obs) else: if self.config["orientationMode"] == 1 or self.config["orientationMode"] == 6 or self.config["orientationMode"] == 7: success = is_it_success elif self.config["orientationMode"] == 2: success = (is_it_success and self.move.phase >= self.config["endingPhase"]) elif self.config["orientationMode"] == 3 or self.config["orientationMode"] == 4 or self.config["orientationMode"] == 5: success = (is_it_success and self.move.path_index >= len(self.move.path)) info.update({"is_success": success}) if self.config["orientationMode"] == 2: if self.move.phase == 1 and is_it_success: print("PHASE 1 COMPLETE! MOVE TO PHASE 2") self._advance_to_phase_two() elif self.move.phase == 2 and is_it_success: print("PHASE 2 COMPLETE! MOVE TO PHASE 3") self._advance_to_final_phase() elif self.move.phase == 3 and is_it_success: print("PHASE 3 COMPLETE!") elif self.config["orientationMode"] == 3 or self.config["orientationMode"] == 4 or self.config["orientationMode"] == 5: if is_it_success: self.move.path_index += 1 if self.move.path_index < len(self.move.path): self.goal = Landmark(self.road, [self.move.path[self.move.path_index][0], self.move.path[self.move.path_index][1]], heading=self.move.path[self.move.path_index][2]) #CREATE LANDMARK IN GOAL LANE AND SET IS AS THE GOAL self.road.objects.append(self.goal) #ADD OBJECT TO ROAD self.num_of_timesteps_in_episode += 1 self.firstFrame = False return obs, reward, terminal, info def _reset(self): print("RESET") self.total_cumulative_reward = 0 self.workbook_testing = Workbook() self.worksheet_testing = self.workbook_testing.active if self.episode_counter == 0: self.workbook = Workbook() self.worksheet = self.workbook.active else: self.workbook.save(self.workbook_file_path) self.episode_counter += 1 self.print_success = True self.update_steering_diff_threshold() self.previousAction = [] self.currentAction = [] self.firstFrame = True self.last_time_path_index_evolved = datetime.now() self.lowest_recorded_path_complete_coeficient = 1.0000000001 self.previous_path_long_dist = 0 self.previous_path_complete_coeficient = 1 self.previous_path_tangent_velocity = 0 self.num_of_timesteps_in_episode = 0 self.sum_distance_to_path = 0 self._create_road(self.config["gridSizeX"], self.config["gridSizeY"]) if self.config["obstacles"] == 1: if self.config["layoutType"] == 1: self._create_obstacles() elif self.config["layoutType"] == 0: self._create_obstacles_classic() if self.config["randomPath"] == 1: goal_spot = self._select_random_goal_spot() else: goal_spot = self.road.network.lanes_list()[self.config["goalSpotNumber"]]#self.np_random.choice(self.road.network.lanes_list()) self._create_vehicles(goal_spot) if self.config["otherVehicles"] == 1: self.create_dummy_parked_vehicles(goal_spot) self._create_move_to_final_goal(goal_spot) filename = self.config["pathsFileName"] if len(self.parkingPaths) == 0 and (self.config["orientationMode"] == 5 or self.config["orientationMode"] == 6 or self.config["orientationMode"] == 7): if self.config["generateNewPaths"] == 1: for initial_position in self.config["initialPosition"]: self.generate_parking_paths(1000, initial_position, self.vehicle.heading) outfile = open(filename,'wb') pickle.dump(self.parkingPaths,outfile) outfile.close() else: infile = open(filename,'rb') self.parkingPaths = pickle.load(infile) infile.close() if self.config["orientationMode"] == 5 or self.config["orientationMode"] == 6 or self.config["orientationMode"] == 7: self.get_path(goal_spot) self.complete_path_length = len(self.move.path) self.create_landmarks_for_path(self.move.path) final_goal_position = goal_spot.position(goal_spot.length/2, 0) self.goal = Landmark(self.road, final_goal_position, heading=goal_spot.heading) #CREATE LANDMARK IN GOAL LANE AND SET IS AS THE GOAL self.road.objects.append(self.goal) if self.config["orientationMode"] == 2: if self.config["startingPhase"] >= 3: self._advance_to_final_phase() elif self.config["startingPhase"] >= 2 or self.is_vehicle_close_to_goal_lane(self.vehicle.position, goal_spot): self._advance_to_phase_two() else: self._advance_to_phase_one() elif self.config["orientationMode"] == 3: self.goal = Landmark(self.road, [self.move.path[0][0], self.move.path[0][1]], heading=self.move.path[0][2]) #CREATE LANDMARK IN GOAL LANE AND SET IS AS THE GOAL self.road.objects.append(self.goal) #ADD OBJECT TO ROAD elif self.config["orientationMode"] == 5: self.goal = Landmark(self.road, [self.move.path[0][0], self.move.path[0][1]], heading=self.move.path[0][2]) #CREATE LANDMARK IN GOAL LANE AND SET IS AS THE GOAL self.road.objects.append(self.goal) #ADD OBJECT TO ROAD self.firstFrame = True ##################### '''for point1, point2 in zip(self.move.path, self.move.path[1:]): print(self.calculate_points_distance(point1, point2))''' ##################### def update_steering_diff_threshold(self): if self.steering_diff_threshold > 0.05: self.steering_diff_threshold = 0.9997 * self.steering_diff_threshold #print(self.steering_diff_threshold) #region ThreePhaseGoalManagement def _advance_to_phase_one(self): self.move.phase = 1 self._create_first_phase_goal() self._set_new_immediate_goal(self.move.firstPhaseGoal) print("PHASE 1!") def _advance_to_phase_two(self): self.move.phase = 2 self._create_second_phase_goal() self._set_new_immediate_goal(self.move.secondPhaseGoal) print("PHASE 2!") def _advance_to_final_phase(self): self.move.phase = 3 self._set_new_immediate_goal(self.move.finalGoal) print("PHASE 3!") def calculate_first_phase_goal_location(self): x_offset = self.vehicle.LENGTH*1.5 y_offset = self.vehicle.LENGTH + self.config["corridorWidth"]/4 secondPhaseGoalPosition = self.calculate_second_phase_goal_location() if self.move.is_vehicle_south_of_final_goal(): if self.move.is_vehicle_west_of_final_goal(): return [secondPhaseGoalPosition[0] + x_offset, secondPhaseGoalPosition[1]-y_offset] else: return [secondPhaseGoalPosition[0] - x_offset, secondPhaseGoalPosition[1]-y_offset] else: if self.move.is_vehicle_west_of_final_goal(): return [secondPhaseGoalPosition[0] + x_offset, secondPhaseGoalPosition[1] + y_offset] else: return [secondPhaseGoalPosition[0] - x_offset, secondPhaseGoalPosition[1] + y_offset] def calculate_first_phase_goal_heading(self): if self.move.is_vehicle_west_of_final_goal(): return math.pi else: return 0 def calculate_second_phase_goal_location(self): if self.config["layoutType"] == 1: return [self.move.finalGoal.position[0], 0] #only supporting north buffer zone for now else: if self.move.is_vehicle_south_of_final_goal(): return [self.move.finalGoal.position[0], self.config["corridorWidth"]/2] else: return [self.move.finalGoal.position[0], -self.config["corridorWidth"]/2] def _create_first_phase_goal(self) -> None: self.move.firstPhaseGoal = Landmark(self.road, self.calculate_first_phase_goal_location(), heading=self.calculate_first_phase_goal_heading()) def _create_second_phase_goal(self) -> None: self.move.secondPhaseGoal = Landmark(self.road, self.calculate_second_phase_goal_location(), heading=np.pi/2) #CREATE LANDMARK IN GOAL LANE def _set_new_immediate_goal(self, goal: Landmark) -> None: self.goal = goal #SET IT AS THE CURRENT GOAL self.road.objects.append(self.goal) #ADD OBJECT TO ROAD def _create_move_to_final_goal(self, goalLaneSpot: StraightLane) -> None: goalLaneSpot.line_types = [LineType.CONTINUOUS,LineType.CONTINUOUS] #CHANGE GOAL LANE TO CONTINUOUS final_goal_position = goalLaneSpot.position(goalLaneSpot.length/2, 0) self.goal = Landmark(self.road, final_goal_position, heading=goalLaneSpot.heading) #CREATE LANDMARK IN GOAL LANE AND SET IS AS THE GOAL #self.road.objects.append(self.goal) #ADD OBJECT TO ROAD self.move = Move(self.controlled_vehicles[0], self.goal) if self.config["orientationMode"] == 1: self.move.phase = 3 elif self.config["orientationMode"] == 3 or self.config["orientationMode"] == 4: if self.is_vehicle_close_to_goal_lane(self.vehicle.position, goalLaneSpot): self.move.phase = 3 self.create_path(final_goal_position, math.degrees(goalLaneSpot.heading)) def get_path(self, goalLaneSpot: StraightLane): if self.config["trackRear"] == 1: half_vehicle_length = self.vehicle.LENGTH / 2 vehicle_rear_position = [self.vehicle.position[0] + half_vehicle_length * math.cos(self.vehicle.heading + math.pi), self.vehicle.position[1] + half_vehicle_length * math.sin(self.vehicle.heading + math.pi)] vehicle_position = tuple(vehicle_rear_position) final_position = [] if goalLaneSpot.position(goalLaneSpot.length/2, 0)[1] > 0: final_position = tuple([goalLaneSpot.position(goalLaneSpot.length/2, 0)[0], goalLaneSpot.position(goalLaneSpot.length/2, 0)[1] - half_vehicle_length]) else: final_position = tuple([goalLaneSpot.position(goalLaneSpot.length/2, 0)[0], goalLaneSpot.position(goalLaneSpot.length/2, 0)[1] + half_vehicle_length]) pair_origin_goal = tuple([vehicle_position, final_position]) self.move.path = self.parkingPaths[pair_origin_goal] self.vehicle.path = self.move.path else: final_position = tuple(goalLaneSpot.position(goalLaneSpot.length/2, 0)) vehicle_position = tuple(self.vehicle.position) pair_origin_goal = tuple([vehicle_position, final_position]) self.move.path = self.parkingPaths[pair_origin_goal]
import io import socket import struct import time from typing import Dict, Iterable, List, Tuple, Union from . import types from .util import get_bits, load_domain_name, load_string, pack_domain_name, pack_string __all__ = [ 'REQUEST', 'RESPONSE', 'DNSError', 'Record', 'DNSMessage', 'RData', 'create_rdata', 'load_rdata', ] REQUEST = 0 RESPONSE = 1 MAXAGE = 3600000 class DNSError(Exception): errors = { 1: 'Format error: bad request', 2: 'Server failure: error occurred', 3: 'Name error: not exist', 4: 'Not implemented: query type not supported', 5: 'Refused: policy reasons' } def __init__(self, code: int, message: str = None): message = self.errors.get(code, message) or 'Unknown reply code: %d' % code super().__init__(message) self.code = code rdata_map = {} def rdata(cls): rdata_map[cls.rtype] = cls return cls def create_rdata(qtype: int, *k) -> 'RData': '''Create RData from parsed data.''' rcls = rdata_map.get(qtype, Unsupported_RData) return rcls(*k) def load_rdata(qtype: int, data: bytes, l: int, size: int) -> Tuple[int, 'RData']: '''Load RData from a byte sequence.''' rcls = rdata_map.get(qtype) if rcls is None: return Unsupported_RData.load(data, l, size, qtype) return rcls.load(data, l, size) class RData: '''Base class of RData''' rtype = -1 data = None def __hash__(self): return hash(self.data) def __eq__(self, other: 'RData'): return self.__class__ == other.__class__ and self.data == other.data def __repr__(self): return '<%s: %s>' % (self.type_name, self.data) @property def type_name(self): return types.get_name(self.rtype).lower() @classmethod def load(cls, data: bytes, l: int, size: int): raise NotImplementedError def dump(self, names: Dict[str, int], offset: int) -> Iterable[bytes]: raise NotImplementedError @rdata class A_RData(RData): '''A record''' rtype = types.A def __init__(self, data: str): self.data = data @classmethod def load(cls, data: bytes, l: int, size: int): ip = socket.inet_ntoa(data[l:l + size]) return l + size, cls(ip) def dump(self, names: Dict[str, int], offset: int) -> Iterable[bytes]: yield socket.inet_aton(self.data) @rdata class AAAA_RData(RData): '''AAAA record''' rtype = types.AAAA def __init__(self, data: str): self.data = data @classmethod def load(cls, data: bytes, l: int, size: int): ip = socket.inet_ntop(socket.AF_INET6, data[l:l + size]) return l + size, cls(ip) def dump(self, names: Dict[str, int], offset: int) -> Iterable[bytes]: yield socket.inet_pton(socket.AF_INET6, self.data) @rdata class SOA_RData(RData): '''Start of Authority record''' rtype = types.SOA def __init__(self, *k): self.data = k ( self.mname, self.rname, self.serial, self.refresh, self.retry, self.expire, self.minimum, ) = k def __repr__(self): return '<%s: %s>' % (self.type_name, self.rname) @classmethod def load(cls, data: bytes, l: int, size: int) -> Tuple[int, 'SOA_RData']: i, mname = load_domain_name(data, l) i, rname = load_domain_name(data, i) ( serial, refresh, retry, expire, minimum, ) = struct.unpack('!LLLLL', data[i:i + 20]) return i + 20, cls(mname, rname, serial, refresh, retry, expire, minimum) def dump(self, names: Dict[str, int], offset: int) -> Iterable[bytes]: mname = pack_domain_name(self.mname, names, offset + 2) yield mname yield pack_domain_name(self.rname, names, offset + 2 + len(mname)) yield struct.pack('!LLLLL', self.serial, self.refresh, self.retry, self.expire, self.minimum) @rdata class MX_RData(RData): '''Mail exchanger record''' rtype = types.MX def __init__(self, *k): self.data = k self.preference, self.exchange = k def __repr__(self): return '<%s-%s: %s>' % (self.type_name, self.preference, self.exchange) @classmethod def load(cls, data: bytes, l: int, size: int) -> Tuple[int, 'MX_RData']: preference, = struct.unpack('!H', data[l:l + 2]) i, exchange = load_domain_name(data, l + 2) return i, cls(preference, exchange) def dump(self, names: Dict[str, int], offset: int) -> Iterable[bytes]: yield struct.pack('!H', self.preference) yield pack_domain_name(self.exchange, names, offset + 4) @rdata class SRV_RData(RData): '''Service record''' rtype = types.SRV def __init__(self, *k): self.data = k self.priority, self.weight, self.port, self.hostname = k def __repr__(self): return '<%s-%s: %s:%s>' % (self.type_name, self.priority, self.hostname, self.port) @classmethod def load(cls, data: bytes, l: int, size: int) -> Tuple[int, 'SRV_RData']: priority, weight, port = struct.unpack('!HHH', data[l:l + 6]) i, hostname = load_domain_name(data, l + 6) return i, cls(priority, weight, port, hostname) def dump(self, names: Dict[str, int], offset: int) -> Iterable[bytes]: yield struct.pack('!HHH', self.priority, self.weight, self.port) yield pack_domain_name(self.hostname, names, offset + 8) @rdata class NAPTR_RData(RData): '''NAPTR record''' rtype = types.NAPTR def __init__(self, *k): self.data = k self.order, self.preference, self.flags, self.service, self.regexp, self.replacement = k def __repr__(self): return '<%s-%s-%s: %s %s %s %s>' % ( self.type_name, self.order, self.preference, self.flags, self.service, self.regexp, self.replacement) @classmethod def load(cls, data: bytes, l: int, size: int) -> Tuple[int, 'NAPTR_RData']: pos = l order, preference = struct.unpack('!HH', data[pos:pos + 4]) pos += 4 length = data[pos] pos += 1 flags = data[pos:pos + length].decode() pos += length length = data[pos] pos += 1 service = data[pos:pos + length].decode() pos += length length = data[pos] pos += 1 regexp = data[pos:pos + length].decode() pos += length i, replacement = load_domain_name(data, pos) return i, cls(order, preference, flags, service, regexp, replacement) def dump(self, names: Dict[str, int], offset: int) -> Iterable[bytes]: raise NotImplementedError class Domain_RData(RData): '''Domain record''' def __init__(self, data: str): self.data = data @classmethod def load(cls, data: bytes, l: int, size: int) -> Tuple[int, 'Domain_RData']: l, domain = load_domain_name(data, l) return l, cls(domain) def dump(self, names: Dict[str, int], offset: int) -> Iterable[bytes]: yield pack_domain_name(self.data, names, offset + 2) @rdata class CNAME_RData(Domain_RData): '''CNAME record''' rtype = types.CNAME @rdata class NS_RData(Domain_RData): '''NS record''' rtype = types.NS @rdata class PTR_RData(Domain_RData): '''PTR record''' rtype = types.PTR @rdata class TXT_RData(RData): '''TXT record''' rtype = types.TXT def __init__(self, data: str): self.data = data @classmethod def load(cls, data: bytes, l: int, size: int) -> Tuple[int, 'TXT_RData']: _, text = load_string(data, l) return l + size, cls(text.decode()) def dump(self, names: Dict[str, int], offset: int) -> Iterable[bytes]: yield pack_string(self.data) class Unsupported_RData(RData): '''Unsupported RData''' def __init__(self, rtype: int, raw: bytes): self.data = rtype, raw self.rtype = rtype self.raw = raw @classmethod def load(cls, data: bytes, l: int, size: int, qtype: int) -> Tuple[int, 'Unsupported_RData']: return l + size, cls(qtype, data[l:l + size]) def dump(self, names: Dict[str, int], offset: int) -> Iterable[bytes]: yield self.raw class Record: def __init__(self, q: int = RESPONSE, name: str = '', qtype: int = types.ANY, qclass: int = 1, ttl: int = 0, data: RData = None): self.q = q self.name = name self.qtype = qtype self.qclass = qclass if q == RESPONSE: self.ttl = ttl # 0 means item should not be cached self.data = data self.timestamp = int(time.time()) def __repr__(self): if self.q == REQUEST: return f'<Record type=request qtype={types.get_name(self.qtype)} name={self.name}>' else: return f'<Record type=response qtype={types.get_name(self.qtype)} name={self.name} ttl={self.ttl} data={self.data}>' def copy(self, **kw): return Record(q=kw.get('q', self.q), name=kw.get('name', self.name), qtype=kw.get('qtype', self.qtype), qclass=kw.get('qclass', self.qclass), ttl=kw.get('ttl', self.ttl), data=kw.get('data', self.data)) def parse(self, data: bytes, l: int): l, self.name = load_domain_name(data, l) self.qtype, self.qclass = struct.unpack('!HH', data[l:l + 4]) l += 4 if self.q == RESPONSE: self.timestamp = int(time.time()) self.ttl, dl = struct.unpack('!LH', data[l:l + 6]) l += 6 _, self.data = load_rdata(self.qtype, data, l, dl) l += dl return l def pack(self, names, offset=0): buf = io.BytesIO() buf.write(pack_domain_name(self.name, names, offset)) buf.write(struct.pack('!HH', self.qtype, self.qclass)) if self.q == RESPONSE: if self.ttl < 0: ttl = MAXAGE else: now = int(time.time()) self.ttl -= now - self.timestamp if self.ttl < 0: self.ttl = 0 self.timestamp = now ttl = self.ttl buf.write(struct.pack('!L', ttl)) data_str = b''.join(self.data.dump(names, offset + buf.tell())) buf.write(pack_string(data_str, '!H')) return buf.getvalue() class DNSMessage: def __init__(self, qr=RESPONSE, qid=0, o=0, aa=0, tc=0, rd=1, ra=1, r=0): self.qr = qr # 0 for request, 1 for response self.qid = qid # id for UDP package self.o = o # opcode: 0 for standard query self.aa = aa # Authoritative Answer self.tc = tc # TrunCation, will be updated on .pack() self.rd = rd # Recursion Desired for request self.ra = ra # Recursion Available for response self.r = r # rcode: 0 for success self.qd: List[Record] = [] self.an: List[Record] = [] # answers self.ns: List[Record] = [] # authority records, aka nameservers self.ar: List[Record] = [] # additional records def __bool__(self): return any(map(len, (self.an, self.ns))) def __getitem__(self, i): return self.an[i] def __iter__(self): return iter(self.an) def __repr__(self): return '<DNSMessage type=%s qid=%d r=%d QD=%s AN=%s NS=%s AR=%s>' % ( self.qr, self.qid, self.r, self.qd, self.an, self.ns, self.ar) def pack(self, size_limit: int = None): z = 0 names: Dict[str, int] = {} buf = io.BytesIO() buf.seek(12) tc = 0 for group in self.qd, self.an, self.ns, self.ar: if tc: break for rec in group: offset = buf.tell() brec = rec.pack(names, offset) if size_limit is not None and offset + len(brec) > size_limit: tc = 1 break buf.write(brec) self.tc = tc buf.seek(0) buf.write( struct.pack('!HHHHHH', self.qid, (self.qr << 15) + (self.o << 11) + (self.aa << 10) + (self.tc << 9) + (self.rd << 8) +
ops.Graph().as_default() as g: cell_inputs = array_ops.placeholder( dtype, shape=[seq_length, batch_size, input_size]) if direction == CUDNN_RNN_UNIDIRECTION: # outputs is one tensor, states are num_layer tuples, each 2 tensors (outputs, states) = _CreateCudnnCompatibleCanonicalRNN(rnn, cell_inputs) if rnn_mode == CUDNN_LSTM: output_h = array_ops.stack([s.h for s in states]) output_c = array_ops.stack([s.c for s in states]) else: output_state = array_ops.stack([s for s in states]) else: # outputs is one tensor. # states is a tuple of 2 tuples: # each sub tuple is num_layer tuples, each with 2 tensors. (outputs, states) = _CreateCudnnCompatibleCanonicalRNN( rnn, cell_inputs, is_bidi=True) output_state_fw, output_state_bw = states if rnn_mode == CUDNN_LSTM: output_h, output_c = [], [] for s_fw, s_bw in zip(output_state_fw, output_state_bw): output_h.append(array_ops.stack([s_fw.h, s_bw.h])) output_c.append(array_ops.stack([s_fw.c, s_bw.c])) output_h = array_ops.concat(output_h, axis=0) output_c = array_ops.concat(output_c, axis=0) else: output_state = [] for s_fw, s_bw in zip(output_state_fw, output_state_bw): output_state.append(array_ops.stack([s_fw, s_bw])) output_state = array_ops.concat(output_state, axis=0) saver = saver_lib.Saver() with self.test_session(use_gpu=True, graph=g) as sess: saver.restore(sess, save_path) # BlockCell inference if rnn_mode == CUDNN_LSTM: outputs_v, output_h_v, output_c_v = sess.run( [outputs, output_h, output_c], feed_dict={cell_inputs: inference_input}) self.assertAllClose(cudnn_outputs_v, outputs_v) cudnn_output_h_v, cudnn_output_c_v = cudnn_output_states_v self.assertAllClose(cudnn_output_h_v, output_h_v) self.assertAllClose(cudnn_output_c_v, output_c_v) else: outputs_v, output_state_v = sess.run( [outputs, output_state], feed_dict={cell_inputs: inference_input}) self.assertAllClose(cudnn_outputs_v, outputs_v, atol=1e-4, rtol=2e-4) (cudnn_output_h_v,) = cudnn_output_states_v self.assertAllClose(cudnn_output_h_v, output_state_v, atol=2e-5, rtol=2e-5) class CudnnRNNTestParamsSize(test_util.TensorFlowTestCase): def _TestOpaqueParamsSize(self, rnn_mode, num_layers, num_units, input_size, dtype, direction): logging.info("Testing one lstm param size with config: %s", locals()) model = CudnnTestModel( rnn_mode, num_layers, num_units, input_size, dtype=dtype, direction=direction) rnn = model.rnn # Min param size estimate = sum(weights.size) + sum(biases.size) min_params_size = ( sum(map(np.prod, rnn.canonical_weight_shapes)) + sum(sp[0] for sp in rnn.canonical_bias_shapes)) opaque_params = rnn.trainable_variables[0] with self.test_session(use_gpu=True, graph=ops.get_default_graph()): variables.global_variables_initializer().run() opaque_params_size_v = opaque_params.eval().size self.assertLessEqual(min_params_size, opaque_params_size_v) @unittest.skipUnless(test.is_built_with_cuda(), "Test only applicable when running on GPUs") def testOpaqueParamsSize(self): test_configs = [ [4, 200, 200], [4, 200, 300], [4, 200, 100], [1, 100, 200], [2, 200, 100], [3, 200, 400], ] directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION] dtype_list = [dtypes.float16, dtypes.float32, dtypes.float64] rnns = [CUDNN_LSTM, CUDNN_GRU, CUDNN_RNN_RELU, CUDNN_RNN_TANH] for (rnn, config, dtype, direction) in itertools.product( rnns, test_configs, dtype_list, directions): num_layers, num_units, input_size = config with ops.Graph().as_default(): self._TestOpaqueParamsSize(rnn, num_layers, num_units, input_size, dtype, direction) class CudnnRNNTestTraining(test_util.TensorFlowTestCase): def setUp(self): super(CudnnRNNTestTraining, self).setUp() self._reset_rnd_gen_state = os.environ.get("TF_CUDNN_RESET_RND_GEN_STATE", str(False)) self._rnn_use_v2 = os.environ.get("TF_CUDNN_RNN_USE_V2", "0") def tearDown(self): super(CudnnRNNTestTraining, self).tearDown() os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = self._reset_rnd_gen_state os.environ["TF_CUDNN_RNN_USE_V2"] = self._rnn_use_v2 def _ComputeNumericGrad(self, sess, y, x, delta=1e-4, step=1): """Compute the numeric gradient of y wrt to x. Args: sess: The TF session constructed with a graph containing x and y. y: A scalar TF Tensor in the graph constructed in sess. x: A TF Tensor in the graph constructed in sess. delta: Gradient checker's small perturbation of x[i]. step: Only compute numerical gradients for a subset of x values. I.e. dy/dx[i] is computed if i % step == 0. Returns: A Tensor of the same shape and dtype as x. If x[i] is not chosen to compute the numerical gradient dy/x[i], the corresponding value is set to 0. """ x_data = sess.run(x) x_size = x_data.size x_shape = x_data.shape numeric_grad = np.zeros(x_size, dtype=x_data.dtype) for i in range(0, x_size, step): x_pos = x_data.copy() if x_size == 1: x_pos += delta else: x_pos.flat[i] += delta y_pos_feed_dict = dict([(x.name, x_pos)]) y_pos = sess.run(y, feed_dict=y_pos_feed_dict) x_neg = x_data.copy() if x_size == 1: x_neg -= delta else: x_neg.flat[i] -= delta y_neg_feed_dict = dict([(x.name, x_neg)]) y_neg = sess.run(y, feed_dict=y_neg_feed_dict) numeric_grad[i] = (y_pos - y_neg) / (2 * delta) return numeric_grad.reshape(x_shape) def _GetShape(self, sess, inputs): if not isinstance(inputs, collections.Iterable): return sess.run(array_ops.shape(inputs)) else: return sess.run([array_ops.shape(x) for x in inputs]) def _GradientCheckFp16(self, sess, y, xs, num_samples, tolerance=1e-6, delta=1e-4): """Gradient check for Fp16. Fp16 numerical gradients end up being zeros. Use a new way to check gradients: Given multi-variant function: y = f(x1, x2, ... xn) delta_y = f(x1 + delta_x1, x2+delta_x2, ..., xn+delta_xn) - f(x1, x2, ..., xn) = f'(x1) * delta_x1 + f'(x2) * delta_x2 + .. + f'(xn) * delta_xn where: delta_xi are very small disturbance. f'(xi) is the gradient of y w.r.t xi. The gradient check verifies the expected delta_y calculated by the above equation is close to the actual delta_y. Args: sess: tf.compat.v1.Session object. y: output tensor. xs: a tensor or a list of input tensors. num_samples: number of test samples to run. tolerance: error tolerance. delta: the order of magnititued of input disturbance to apply to calculate the output change w.r.t inputs. """ sym_grads = self._ComputeSymGrads(sess, y, xs) xs_shapes = self._GetShape(sess, xs) x_vals = [sess.run(x) for x in xs] for _ in range(num_samples): delta_xs = [delta * np.random.rand(*shape.tolist()) for shape in xs_shapes] feed_dict = {} for x, x_val, delta_x in zip(xs, x_vals, delta_xs): feed_dict[x] = x_val + delta_x actual_delta_y = (float(sess.run(y, feed_dict=feed_dict)) - float(sess.run(y))) expected_delta_y = 0. for sym_grad, delta_x in zip(sym_grads, delta_xs): expected_delta_y += np.dot( sym_grad.astype(np.float32).flatten(), delta_x.astype(np.float32).flatten()) self.assertAllClose(expected_delta_y, actual_delta_y, atol=tolerance, rtol=tolerance) def _GradientCheck(self, sess, y, xs, tolerance=1e-6, delta=1e-4): sym_grads = self._ComputeSymGrads(sess, y, xs) num_grads = [self._ComputeNumericGrad(sess, y, x, delta) for x in xs] self.assertEqual(len(sym_grads), len(num_grads)) for x, sym, num in zip(xs, sym_grads, num_grads): logging.info("Comparing gradients for input: %s", x.name) self.assertFalse(np.any(np.isnan(sym))) self.assertFalse(np.any(np.isnan(num))) self.assertAllClose(sym, num, atol=tolerance, rtol=tolerance) def _ComputeSymGrads(self, sess, y, xs): sym_grads_t = gradients.gradients(y, xs) return sess.run(sym_grads_t) def _TestOneSimpleTraining(self, rnn_mode, num_layers, num_units, input_size, batch_size, seq_length, dir_count, dropout, dtype, use_v2, delta, tolerance): # Gradient checking runs two forward ops with almost the same input. Need to # make sure the drop patterns across the two runs are the same. logging.info("Training test with config: %s", locals()) os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = str(True) np.random.seed(1234) random_seed.set_random_seed(5678) has_input_c = (rnn_mode == CUDNN_LSTM) direction = (CUDNN_RNN_UNIDIRECTION if dir_count == 1 else CUDNN_RNN_BIDIRECTION) if use_v2: os.environ["TF_CUDNN_RNN_USE_V2"] = "1" else: os.environ["TF_CUDNN_RNN_USE_V2"] = "0" model = CudnnTestModel( rnn_mode, num_layers, num_units, input_size, direction=direction, dropout=dropout, dtype=dtype, training=True, bias_initializer=init_ops.random_normal_initializer( mean=1., dtype=dtype)) rnn = model.rnn params = rnn.trainable_variables[0] inputs = variables.Variable( random_ops.random_uniform([seq_length, batch_size, input_size], dtype=dtype), dtype=dtype).read_value() input_h = variables.Variable( random_ops.random_uniform( [num_layers * dir_count, batch_size, num_units], dtype=dtype), dtype=dtype).read_value() if has_input_c: input_c = variables.Variable( random_ops.random_uniform( [num_layers * dir_count, batch_size, num_units], dtype=dtype), dtype=dtype).read_value() initial_state = (input_h, input_c) else: initial_state = (input_h,) total_sum = model.FProp(inputs, initial_state, training=True) with self.test_session(use_gpu=True, graph=ops.get_default_graph()) as sess: sess.run(variables.global_variables_initializer()) all_inputs = [inputs, params] for s in initial_state: all_inputs.append(s) if dtype == dtypes.float16: self._GradientCheckFp16( sess, total_sum, all_inputs, num_samples=FLAGS.grad_check_num_samples, tolerance=tolerance, delta=delta) else: for _ in range(FLAGS.grad_check_num_samples): # Each time choose a different set of inputs. sess.run(variables.global_variables_initializer()) self._GradientCheck( sess, total_sum, all_inputs, tolerance=tolerance, delta=delta) def _TestSimpleTrainingHelper(self, rnn_mode, test_configs): dropouts = [0, 0.5, 1.] v2_options = [False, True] for config, dropout, use_v2 in itertools.product(test_configs, dropouts, v2_options): dtype = config.get("dtype", dtypes.float32) delta = config.get("delta", 1e-4) tolerance = config.get("tolerance", 1e-6) dir_count = config.get("dir_count", 1) shape = config["shape"] if dtype == dtypes.float64: # TODO(jamesqin): b/117848763 use_v2 = False with ops.Graph().as_default(): self._TestOneSimpleTraining( rnn_mode, shape["num_layers"], shape["num_units"], shape["input_size"], shape["batch_size"], shape["seq_length"], dir_count, dropout, dtype, use_v2, delta, tolerance) @unittest.skipUnless(test.is_built_with_cuda(), "Test only applicable when running on GPUs") def testSimpleTrainingLSTMFp64(self): test_configs = [ { "dtype": dtypes.float64, "tolerance": 5e-6, "shape": { "num_layers": 2, "num_units": 3, "input_size": 4, "batch_size": 3, "seq_length": 4, }, }, ] self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs) @unittest.skipUnless(test.is_built_with_cuda(), "Test only applicable when running on GPUs") def testSimpleTrainingLSTMFp32(self): test_configs = [ { "dtype": dtypes.float32, "delta": 1e-4, "tolerance": 9e-2, "shape": { "num_layers": 2, "num_units": 3, "input_size": 4, "batch_size": 3, "seq_length": 4, }, }, ] self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs) @unittest.skipUnless(test.is_built_with_cuda(), "Test only applicable when running on GPUs") def testSimpleTrainingLSTMFp16(self): test_configs = [ { "dtype": dtypes.float16, "delta": 1e-3, "tolerance": 9e-2, "shape": { "num_layers": 2, "num_units": 3, "input_size": 4, "batch_size": 3, "seq_length": 4, }, }, { "dtype": dtypes.float16, "delta": 1e-2, "tolerance": 9e-2, "shape": { "num_layers": 2, "num_units": 6, "input_size": 8, "batch_size": 6, "seq_length": 4, }, }, ] self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs) @unittest.skipUnless(test.is_built_with_cuda(), "Test only applicable when running on GPUs") def testSimpleTrainingGRUFp64(self): test_configs = [ { "dtype": dtypes.float64, "tolerance": 5e-6, "shape": { "num_layers": 2, "num_units": 3, "input_size": 4, "batch_size": 3, "seq_length": 4, } }, ] self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs) @unittest.skipUnless(test.is_built_with_cuda(), "Test only applicable when running on GPUs") def testSimpleTrainingGRUFp32(self): test_configs = [ { "dtype": dtypes.float32, "delta": 1e-3, "tolerance": 4e-3, "shape": { "num_layers": 2, "num_units": 3, "input_size": 4, "batch_size": 3, "seq_length": 4, }, }, ] self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs) @unittest.skipUnless(test.is_built_with_cuda(), "Test only applicable when running on GPUs") def testSimpleTrainingGRUFp16(self): test_configs = [ { "dtype": dtypes.float16,
<gh_stars>100-1000 import os import pytest import mbuild as mb import mbuild.formats.gomc_conf_writer as gomc_control from mbuild.formats.charmm_writer import Charmm from mbuild.lattice import load_cif from mbuild.tests.base_test import BaseTest from mbuild.utils.io import get_fn, has_foyer @pytest.mark.skipif(not has_foyer, reason="Foyer package not installed") class TestGOMCControlFileWriter(BaseTest): def test_dict_keys_to_list( self, ): dict = {"a": "1", "b": "2", "c": "3"} keys = gomc_control.dict_keys_to_list(dict) assert keys == ["a", "b", "c"] def test_get_required_data(self): value = gomc_control._get_required_data(description=False) assert ( value.sort() == [ "charmm_object", "ensemble_type", "RunSteps", "Temperature", "ff_psf_pdb_file_directory", "check_input_files_exist", "Restart", "RestartCheckpoint", "Parameters", "Coordinates_box_0", "override_psf_box_0", "Coordinates_box_1", "Structure_box_1", "binCoordinates_box_0", "extendedSystem_box_0", "binVelocities_box_0", "binCoordinates_box_1", "extendedSystem_box_1", "binVelocities_box_1", ].sort() ) value = gomc_control._get_required_data(description=True) assert ( gomc_control.dict_keys_to_list(value).sort() == [ "charmm_object", "ensemble_type", "RunSteps", "Temperature", "ff_psf_pdb_file_directory", "Restart", "RestartCheckpoint", "ExpertMode", "check_input_files_exist", "Parameters", "Coordinate_box_0", "Structure_box_0", "Coordinate_box_1", "Structure_box_1", "binCoordinates_box_0", "extendedSystem_box_0", "binVelocities_box_0", "binCoordinates_box_1", "extendedSystem_box_1", "binVelocities_box_1", ].sort() ) def test_get_all_possible_input_variable(self): value = gomc_control._get_all_possible_input_variables( description=False ) assert ( value.sort() == [ "PRNG", "ParaTypeCHARMM", "ParaTypeMie", "ParaTypeMARTINI", "RcutCoulomb_box_0", "RcutCoulomb_box_1", "Pressure", "Rcut", "RcutLow", "LRC", "Exclude", "Potential", "Rswitch", "ElectroStatic", "Ewald", "CachedFourier", "Tolerance", "Dielectric", "PressureCalc", "EqSteps", "AdjSteps", "VDWGeometricSigma", "useConstantArea", "FixVolBox0", "ChemPot", "Fugacity", "CBMC_First", "CBMC_Nth", "CBMC_Ang", "CBMC_Dih", "OutputName", "CoordinatesFreq", "DCDFreq", "RestartFreq", "CheckpointFreq", "ConsoleFreq", "BlockAverageFreq", "HistogramFreq", "DistName", "HistName", "RunNumber", "RunLetter", "SampleFreq", "OutEnergy", "OutPressure", "OutMolNum", "OutDensity", "OutVolume", "OutSurfaceTension", "FreeEnergyCalc", "MoleculeType", "InitialState", "LambdaVDW", "LambdaCoulomb", "ScaleCoulomb", "ScalePower", "ScaleAlpha", "MinSigma", "DisFreq", "RotFreq", "IntraSwapFreq", "SwapFreq", "RegrowthFreq", "CrankShaftFreq", "VolFreq", "MultiParticleFreq", "IntraMEMC-1Freq", "MEMC-1Freq", "IntraMEMC-2Freq", "MEMC-2Freq", "IntraMEMC-3Freq", "MEMC-3Freq", "ExchangeVolumeDim", "MEMC_DataInput", ].sort() ) value = gomc_control._get_all_possible_input_variables(description=True) assert ( gomc_control.dict_keys_to_list(value).sort() == [ "PRNG", "ParaTypeCHARMM", "ParaTypeMie", "ParaTypeMARTINI", "RcutCoulomb_box_0", "RcutCoulomb_box_1", "Pressure", "Rcut", "RcutLow", "LRC", "Exclude", "Potential", "Rswitch", "ElectroStatic", "Ewald", "CachedFourier", "Tolerance", "Dielectric", "PressureCalc", "EqSteps", "AdjSteps", "VDWGeometricSigma", "useConstantArea", "FixVolBox0", "ChemPot", "Fugacity", "CBMC_First", "CBMC_Nth", "CBMC_Ang", "CBMC_Dih", "OutputName", "CoordinatesFreq", "DCDFreq", "RestartFreq", "CheckpointFreq", "ConsoleFreq", "BlockAverageFreq", "HistogramFreq", "DistName", "HistName", "RunNumber", "RunLetter", "SampleFreq", "OutEnergy", "OutPressure", "OutMolNum", "OutDensity", "OutVolume", "OutSurfaceTension", "FreeEnergyCalc", "MoleculeType", "InitialState", "LambdaVDW", "LambdaCoulomb", "ScaleCoulomb", "ScalePower", "ScaleAlpha", "MinSigma", "DisFreq", "RotFreq", "IntraSwapFreq", "SwapFreq", "RegrowthFreq", "CrankShaftFreq", "VolFreq", "MultiParticleFreq", "IntraMEMC-1Freq", "MEMC-1Freq", "IntraMEMC-2Freq", "MEMC-2Freq", "IntraMEMC-3Freq", "MEMC-3Freq", "ExchangeVolumeDim", "MEMC_DataInput", ].sort() ) def test_get_default_variables_dict(self): value = gomc_control._get_default_variables_dict() assert ( gomc_control.dict_keys_to_list(value).sort() == [ "PRNG", "ParaTypeCHARMM", "ParaTypeMie", "ParaTypeMARTINI", "RcutCoulomb_box_0", "RcutCoulomb_box_1", "Pressure", "Rcut", "RcutLow", "LRC", "Exclude", "coul_1_4_scaling", "Potential", "Rswitch", "ElectroStatic", "Ewald", "CachedFourier", "Tolerance", "Dielectric", "PressureCalc", "EqSteps", "AdjSteps", "VDWGeometricSigma", "useConstantArea", "FixVolBox0", "ChemPot", "Fugacity", "CBMC_First", "CBMC_Nth", "CBMC_Ang", "CBMC_Dih", "OutputName", "CoordinatesFreq", "DCDFreq", "RestartFreq", "CheckpointFreq", "ConsoleFreq", "BlockAverageFreq", "HistogramFreq", "DistName", "HistName", "RunNumber", "RunLetter", "SampleFreq", "OutEnergy", "OutPressure", "OutMolNum", "OutDensity", "OutVolume", "OutSurfaceTension", "FreeEnergyCalc", "MoleculeType", "InitialState", "LambdaVDW", "LambdaCoulomb", "ScaleCoulomb", "ScalePower", "ScaleAlpha", "MinSigma", "ExchangeVolumeDim", "MEMC_DataInput", "DisFreq", "RotFreq", "IntraSwapFreq", "SwapFreq", "RegrowthFreq", "CrankShaftFreq", "VolFreq", "MultiParticleFreq", "IntraMEMC-1Freq", "MEMC-1Freq", "IntraMEMC-2Freq", "MEMC-2Freq", "IntraMEMC-3Freq", "MEMC-3Freq", ].sort() ) def test_print_ensemble_info(self): try: gomc_control.print_required_input(description=True) gomc_control.print_required_input(description=False) test_status = "PASSED" except: test_status = "FAILED" assert test_status == "PASSED" try: gomc_control.print_valid_ensemble_input_variables( "NVT", description=True ) gomc_control.print_valid_ensemble_input_variables( "NVT", description=False ) test_status = "PASSED" except: test_status = "FAILED" assert test_status == "PASSED" try: gomc_control.print_valid_ensemble_input_variables( "NPT", description=True ) gomc_control.print_valid_ensemble_input_variables( "NPT", description=False ) test_status = "PASSED" except: test_status = "FAILED" assert test_status == "PASSED" try: gomc_control.print_valid_ensemble_input_variables( "GEMC_NVT", description=True ) gomc_control.print_valid_ensemble_input_variables( "GEMC_NVT", description=False ) test_status = "PASSED" except: test_status = "FAILED" assert test_status == "PASSED" try: gomc_control.print_valid_ensemble_input_variables( "GEMC_NPT", description=True ) gomc_control.print_valid_ensemble_input_variables( "GEMC_NPT", description=False ) test_status = "PASSED" except: test_status = "FAILED" assert test_status == "PASSED" try: gomc_control.print_valid_ensemble_input_variables( "GCMC", description=True ) gomc_control.print_valid_ensemble_input_variables( "GCMC", description=False ) test_status = "PASSED" except: test_status = "FAILED" assert test_status == "PASSED" try: gomc_control.print_valid_ensemble_input_variables( "XXXXX", description=True ) gomc_control.print_valid_ensemble_input_variables( "XXXXX", description=False ) test_status = "PASSED" except: test_status = "FAILED" assert test_status == "FAILED" def test_get_possible_ensemble_input_variables(self): with pytest.warns( UserWarning, match="WARNING: The ensemble_type selected for " "the _get_possible_ensemble_input_variables " "function is not valid.", ): gomc_control._get_possible_ensemble_input_variables("XXX") def test_wrong_ensemble_gomccontrol(self, ethane_gomc): test_box_ethane_gomc = mb.fill_box( compound=[ethane_gomc], n_compounds=[1], box=[1, 1, 1] ) charmm = Charmm( test_box_ethane_gomc, "ethane_box_0", structure_box_1=None, filename_box_1=None, ff_filename="ethane_FF", residues=[ethane_gomc.name], forcefield_selection="oplsaa", ) ensemble_input = "XXXXX" with pytest.raises( ValueError, match=r"ERROR: The ensemble type selection of '{}' is not a valid ensemble option. " r"Please choose the 'NPT', 'NVT', 'GEMC_NVT', 'GEMC_NPT', or 'GCMC' " "ensembles".format(ensemble_input), ): gomc_control.write_gomc_control_file( charmm, "test_wrong_ensemble_gomccontrol", ensemble_input, 100, 300, check_input_files_exist=False, ) def test_charmm_ff_name_is_none(self, ethane_gomc): test_box_ethane_gomc = mb.fill_box( compound=[ethane_gomc], n_compounds=[1], box=[1, 1, 1] ) charmm = Charmm( test_box_ethane_gomc, "ethane_box_0", structure_box_1=None, filename_box_1=None, ff_filename=None, residues=[ethane_gomc.name], forcefield_selection="oplsaa", ) with pytest.raises( ValueError, match=r"The force field file name was not specified and in the Charmm object \({}\)." r"Therefore, the force field file \(.inp\) can not be written, and thus, the " r"GOMC control file \(.conf\) can not be created. Please use the force field file " r"name when building the Charmm object".format(type(Charmm)), ): gomc_control.write_gomc_control_file( charmm, "test_charmm_ff_name_is_none", "NVT", 100, 300, check_input_files_exist=False, ) def test_input_variables_dict_wrong_value(self, ethane_gomc): test_box_ethane_gomc = mb.fill_box( compound=[ethane_gomc], n_compounds=[1], box=[1, 1, 1] ) charmm = Charmm( test_box_ethane_gomc, "ethane_box_0", structure_box_1=None, filename_box_1=None, ff_filename="ethane_FF", residues=[ethane_gomc.name], forcefield_selection="oplsaa", ) with pytest.raises( ValueError, match=r"ERROR: The input_variables_dict variable is not None or a dictionary.", ): gomc_control.write_gomc_control_file( charmm, "test_input_variables_dict_wrong_value", "NVT", 100, 300, check_input_files_exist=False, input_variables_dict="XXXXX", ) def test_not_entered_charmm_object(self): not_charmm_object = "XXXXX" with pytest.raises( TypeError, match=r"ERROR: The variable supplied is a \({}\), not a charmm_object \({}\)" r"".format(type(not_charmm_object), type(Charmm)), ): gomc_control.write_gomc_control_file( not_charmm_object, "test_not_charmm_object", "NVT", 100, 300, check_input_files_exist=False, ) def test_save_basic_NVT(self, ethane_gomc): test_box_ethane_gomc = mb.fill_box( compound=[ethane_gomc], n_compounds=[1], box=[1, 1, 1] ) charmm = Charmm( test_box_ethane_gomc, "ethane", ff_filename="ethane", residues=[ethane_gomc.name], forcefield_selection="oplsaa", ) charmm.write_inp() charmm.write_psf() charmm.write_pdb() gomc_control.write_gomc_control_file( charmm, "test_save_basic_NVT.conf", "NVT", 10, 300, check_input_files_exist=True, Restart=False, ) with open("test_save_basic_NVT.conf", "r") as fp: variables_read_dict = { "Restart": False, "ExpertMode": False, "PRNG": False, "ParaTypeCHARMM": False, "Parameters": False, "Coordinates": False, "Structure": False, "Temperature": False, "Potential": False, "LRC": False, "Rcut": False, "RcutLow": False, "VDWGeometricSigma": False, "Exclude": False, "Ewald": False, "ElectroStatic": False, "CachedFourier": False, "Tolerance": False, "1-4scaling": False, "PressureCalc": False, "RunSteps": False, "EqSteps": False, "AdjSteps": False, "DisFreq": False, "RotFreq": False, "IntraSwapFreq": False, "SwapFreq": False, "RegrowthFreq": False, "CrankShaftFreq": False, "VolFreq": False, "MultiParticleFreq": False, "IntraMEMC-1Freq": False, "MEMC-1Freq": False, "IntraMEMC-2Freq": False, "MEMC-2Freq": False, "IntraMEMC-3Freq": False, "MEMC-3Freq": False, "CellBasisVector1": False, "CellBasisVector2": False, "CellBasisVector3": False, "CBMC_First": False, "CBMC_Nth": False, "CBMC_Ang": False, "CBMC_Dih": False, "OutputName": False, "RestartFreq": False, "CheckpointFreq": False, "CoordinatesFreq": False, "ConsoleFreq": False, "BlockAverageFreq": False, "HistogramFreq": False, "DistName": False, "HistName": False, "RunNumber": False, "RunLetter": False, "SampleFreq": False, "OutEnergy": False, "OutPressure": False, "OutMolNum": False, "OutDensity": False, "OutVolume": False, "OutSurfaceTension": False, } out_gomc = fp.readlines() for i, line in enumerate(out_gomc): if line.startswith("Restart "): variables_read_dict["Restart"] = True split_line = line.split() assert split_line[1] == "False" elif line.startswith("ExpertMode"): variables_read_dict["ExpertMode"] = True split_line = line.split() assert split_line[1] == "False" elif line.startswith("PRNG "): variables_read_dict["PRNG"] = True split_line = line.split() assert split_line[1] == "RANDOM" elif line.startswith("ParaTypeCHARMM "): variables_read_dict["ParaTypeCHARMM"] = True split_line = line.split() assert split_line[1] == "True" elif line.startswith("Parameters "): variables_read_dict["Parameters"] = True split_line = line.split() assert split_line[1] == "ethane.inp" elif line.startswith("Coordinates "): variables_read_dict["Coordinates"] = True split_line = line.split() assert split_line[1] == "0" assert split_line[2] == "ethane.pdb" elif line.startswith("Structure "): variables_read_dict["Structure"] = True split_line = line.split() assert split_line[1] == "0" assert split_line[2] == "ethane.psf" elif line.startswith("Temperature "): variables_read_dict["Temperature"] = True split_line = line.split() assert split_line[1] == "300" elif line.startswith("Potential "): variables_read_dict["Potential"] = True split_line = line.split() assert split_line[1] == "VDW" elif line.startswith("LRC "): variables_read_dict["LRC"] = True split_line = line.split() assert split_line[1] == "True" elif line.startswith("Rcut "): variables_read_dict["Rcut"] = True split_line = line.split() assert split_line[1] == "10" elif line.startswith("RcutLow "): variables_read_dict["RcutLow"] = True split_line = line.split() assert split_line[1] == "1" elif line.startswith("VDWGeometricSigma "): variables_read_dict["VDWGeometricSigma"] = True split_line = line.split() assert split_line[1] == "False" elif line.startswith("Exclude "): variables_read_dict["Exclude"] = True split_line = line.split() assert split_line[1] == "1-3" elif line.startswith("Ewald "): variables_read_dict["Ewald"] = True split_line = line.split() assert split_line[1] == "True" elif line.startswith("ElectroStatic "): variables_read_dict["ElectroStatic"] = True split_line = line.split() assert split_line[1] == "True" elif line.startswith("CachedFourier "): variables_read_dict["CachedFourier"] = True split_line = line.split() assert split_line[1] == "False" elif line.startswith("Tolerance "): variables_read_dict["Tolerance"] = True split_line = line.split() assert split_line[1] == "1e-05" elif line.startswith("1-4scaling "): variables_read_dict["1-4scaling"] = True split_line = line.split() assert split_line[1] == "0.5" elif line.startswith("PressureCalc "): variables_read_dict["PressureCalc"] = True split_line = line.split() assert split_line[1] == "True" assert split_line[2] == "1" elif line.startswith("RunSteps "): variables_read_dict["RunSteps"] = True split_line = line.split() assert split_line[1] == "10" elif line.startswith("EqSteps "): variables_read_dict["EqSteps"] = True split_line = line.split() assert split_line[1] == "1" elif line.startswith("AdjSteps "): variables_read_dict["AdjSteps"] = True split_line = line.split() assert split_line[1] == "1" elif line.startswith("DisFreq "): variables_read_dict["DisFreq"] = True split_line = line.split() assert split_line[1] == "0.15" elif line.startswith("RotFreq "): variables_read_dict["RotFreq"] = True split_line = line.split() assert split_line[1] == "0.15" elif line.startswith("IntraSwapFreq "): variables_read_dict["IntraSwapFreq"] = True split_line = line.split() assert split_line[1] == "0.3" elif line.startswith("SwapFreq "): variables_read_dict["SwapFreq"] = True split_line = line.split()
<filename>io_scene_halo/file_wrl/build_scene.py # ##### BEGIN MIT LICENSE BLOCK ##### # # MIT License # # Copyright (c) 2022 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # ##### END MIT LICENSE BLOCK ##### import re import bpy import bmesh from .format import Object from mathutils import Vector from ..global_functions import mesh_processing, global_functions def infer_error_type(binding_type, mtl_diffuse_colors): ''' Infer the type of error based on color used by Tool ''' # thanks to dt192 for this trick! color_names = { "1.000000 0.000000 0.000000": "red", "0.000000 1.000000 0.000000": "green", "1.000000 0.500000 0.000000": "orange", "0.000000 1.000000 1.000000": "cyan", "1.000000 1.000000 0.000000": "yellow", "1.000000 0.000000 1.000000": "magenta", "0.000000 0.000000 0.000000": "black", "0.000000 0.000000 1.000000": "blue", # unconfirmed values: } color_info = " (white)" if mtl_diffuse_colors: found_colors = set() for color in mtl_diffuse_colors: color_name = color_names.get(color) found_colors.add(color_name) color_info = " (" + ", ".join(sorted(found_colors)) + ")" if binding_type == "PER_FACE": ### WARNING found nearly coplanar surfaces (red and green). if "red" in found_colors and "green" in found_colors: return "nearly coplanar surfaces" + color_info ### WARNING found #1 degenerate triangles. ### ERROR found z buffered triangles (red). if "red" in found_colors: return "degenerate or z-buffered triangle" + color_info ### WARNING: portal outside the bsp. [see magenta in error geometry] if "magenta" in found_colors: return "portal outside BSP" + color_info elif binding_type == "PER_VERTEX": ### ERROR edge #%d is open (red) ### ERROR couldn't update edge #%d (red) ### ERROR edge #%d is too short (red) # edge has more than four triangles (see red in error geometry) if "red" in found_colors: return "bad edge" + color_info ### WARNING unearthed edge (magenta boxed lines) ### WARNING found possible T-junction (pink). if "magenta" in found_colors: return "unearthed edge or T-junction" + color_info ### WARNING: a surface clipped to no leaves (see cyan in error geometry) if "cyan" in found_colors: return "surface clipped to no leaves" + color_info ### WARNING: portal doesn't divide any space (it may be coincident with seam sealer?). [see green in error geometry] if "green" in found_colors: return "portal does not divide space" + color_info ### ERROR: portal does not define two closed spaces. (see yellow in error geometry) if "yellow" in found_colors: return "portal does not define two closed spaces" + color_info ### WARNING: found duplicate triangle building connected geometry. YOU SHOULD FIX THIS. (see orange in error geometry) ### ERROR couldn't build bsp because of overlapping surfaces (orange) if "orange" in found_colors: return "duplicate triangle or overlapping surface" + color_info #two fog planes intersected in a cluster (see black in error geometry). if "black" in found_colors: return "two fog planes intersected in a cluster" + color_info #degenerate triangle [or triangle with bad uvs] (see blue in error geometry) if "blue" in found_colors: return "degenerate triangle or UVs" + color_info return "unknown" + color_info def get_material_name(diffuse, error_type): mat_name = error_type if error_type == "nearly coplanar surfaces (green, red)": if diffuse == (1.0, 0.0, 0.0, 1.0): mat_name = "nearly coplanar surfaces (red)" elif diffuse == (0.0, 1.0, 0.0, 1.0): mat_name = "nearly coplanar surfaces (green)" return mat_name def set_object_properties(context, object): mesh_processing.deselect_objects(context) mesh_processing.select_object(context, object) object.show_name = True bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='MEDIAN') mesh_processing.deselect_objects(context) def build_object_list_old(WRL): error_list = [] object_list = [] for root_node_idx, root_node in enumerate(WRL.nodes): object = Object() error = "" diffuse_nodes = [] material_binding = "" faces = [] edges = [] points = [] for child_node in root_node.child_nodes: if child_node.header == "Coordinate3": for content_node in child_node.child_nodes: point_list = [] for content in content_node.content.split(","): for set in content.split(" "): value = set.strip() if not global_functions.string_empty_check(value): point_list.append(value) for point_idx in range(int(len(point_list) / 3)): set_idx = 3 * point_idx points.append(Vector((float(point_list[set_idx]), float(point_list[set_idx + 1]), float(point_list[set_idx + 2])))) elif child_node.header == "IndexedFaceSet": for content_node in child_node.child_nodes: face_list = [] for content in content_node.content.split(","): value = content.strip() if not global_functions.string_empty_check(value): face_list.append(value) for face_idx in range(int(len(face_list) / 4)): set_idx = 4 * face_idx v0 = face_list[set_idx] v1 = face_list[set_idx + 1] v2 = face_list[set_idx + 2] v3 = face_list[set_idx + 3] faces.append((int(v0), int(v1), int(v2), int(v3))) elif child_node.header == "IndexedLineSet": for content_node in child_node.child_nodes: edge_list = [] for content in content_node.content.split(","): value = content.strip() if not global_functions.string_empty_check(value): edge_list.append(value) for edge_idx in range(int(len(edge_list) / 3)): set_idx = 3 * edge_idx v0 = edge_list[set_idx] v1 = edge_list[set_idx + 1] v2 = edge_list[set_idx + 2] edges.append((int(v0), int(v1), int(v2))) elif child_node.header == "MaterialBinding": value = child_node.content.split(" ")[1] material_binding = value elif child_node.header == "Material": for content_node in child_node.child_nodes: if content_node.header == "diffuseColor": for content in content_node.content.split(","): value = content.strip() if not global_functions.string_empty_check(value): diffuse_nodes.append(value) error = infer_error_type(material_binding, diffuse_nodes) if not error in error_list: error_list.append(error) object.error = error object.diffuse_nodes = diffuse_nodes object.material_binding = material_binding object.faces = faces object.edges = edges object.points = points object_list.append(object) return error_list, object_list def build_object_list_new(WRL): error_list = [] object_list = [] for root_node_idx, root_node in enumerate(WRL.nodes): object = Object() error = "" type = "" diffuse_nodes = [] material_binding = "" faces = [] edges = [] points = [] for child_node in root_node.child_nodes: geometry_header_list = child_node.header.split() error = re.findall(r'"([^"]*)"', child_node.header)[0] type = geometry_header_list[-1] material_binding = bool(child_node.content.split(" ")[1]) for content_node in child_node.child_nodes: if content_node.header == "coord Coordinate": for value_node in content_node.child_nodes: if value_node.header == "point": for point_idx in range(int(len(value_node.content) / 3)): set_idx = 3 * point_idx points.append(Vector((float(value_node.content[set_idx]), float(value_node.content[set_idx + 1]), float(value_node.content[set_idx + 2])))) elif value_node.header == "coordIndex": if type == "IndexedFaceSet": for face_idx in range(int(len(value_node.content) / 4)): set_idx = 4 * face_idx v0 = value_node.content[set_idx] v1 = value_node.content[set_idx + 1] v2 = value_node.content[set_idx + 2] v3 = value_node.content[set_idx + 3] faces.append((int(v0), int(v1), int(v2), int(v3))) elif type == "IndexedLineSet": for edge_idx in range(int(len(value_node.content) / 3)): set_idx = 3 * edge_idx v0 = value_node.content[set_idx] v1 = value_node.content[set_idx + 1] v2 = value_node.content[set_idx + 2] edges.append((int(v0), int(v1), int(v2))) elif content_node.header == "color Color": for value_node in content_node.child_nodes: for value_idx in range(int(len(value_node.content) / 3)): set_idx = 3 * value_idx diffuse_nodes.append("%s %s %s" % (value_node.content[set_idx], value_node.content[set_idx + 1], value_node.content[set_idx + 2])) if not error in error_list: error_list.append(error) object.error = error object.diffuse_nodes = diffuse_nodes object.material_binding = material_binding object.faces = faces object.edges = edges object.points = points object_list.append(object) return error_list, object_list def build_scene(context, WRL, report): if WRL.version == 1.0: error_list, object_list = build_object_list_old(WRL) elif WRL.version == 2.0: error_list, object_list = build_object_list_new(WRL) for error in error_list: face_idx = 0 mesh = bpy.data.meshes.new(error) object_mesh = bpy.data.objects.new(error, mesh) context.collection.objects.link(object_mesh) bm = bmesh.new() for object in object_list: if object.error == error: if len(object.edges) > 0: for edge in object.edges: vert_list = [] if not edge[0] == -1: vert_list.append(bm.verts.new(object.points[edge[0]])) if not edge[1] == -1: vert_list.append(bm.verts.new(object.points[edge[1]])) if not edge[2] == -1: vert_list.append(bm.verts.new(object.points[edge[2]])) bm.edges.new(vert_list) bm.edges.ensure_lookup_table() for object_edge_idx, edge in enumerate(object.edges): color = object.diffuse_nodes[object_edge_idx] r, g, b = color.split() diffuse = (float(r), float(g), float(b), 1.0) mat_name = get_material_name(diffuse, object.error) error_mat = bpy.data.materials.get(mat_name) if error_mat is None: error_mat = bpy.data.materials.new(name=mat_name) error_mat.diffuse_color = diffuse object_mesh.data.materials.append(error_mat) else: if error_mat not in list(object_mesh.data.materials): object_mesh.data.materials.append(error_mat) if len(object.faces) > 0: for face in object.faces: vert_list = [] if not face[0] == -1: vert_list.append(bm.verts.new(object.points[face[0]])) if not face[1] == -1: vert_list.append(bm.verts.new(object.points[face[1]])) if not face[2] == -1: vert_list.append(bm.verts.new(object.points[face[2]])) if not face[3] == -1:
return w_class, unwrap_cell(space, w_value) @elidable def _pure_lookup_where_with_method_cache(w_self, name, version_tag): space = w_self.space cache = space.fromcache(MethodCache) SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp SHIFT1 = SHIFT2 - 5 version_tag_as_int = current_object_addr_as_int(version_tag) # ^^^Note: if the version_tag object is moved by a moving GC, the # existing method cache entries won't be found any more; new # entries will be created based on the new address. The # assumption is that the version_tag object won't keep moving all # the time - so using the fast current_object_addr_as_int() instead # of a slower solution like hash() is still a good trade-off. hash_name = compute_hash(name) product = intmask(version_tag_as_int * hash_name) method_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2 # ^^^Note2: we used to just take product>>SHIFT2, but on 64-bit # platforms SHIFT2 is really large, and we loose too much information # that way (as shown by failures of the tests that typically have # method names like 'f' who hash to a number that has only ~33 bits). cached_version_tag = cache.versions[method_hash] if cached_version_tag is version_tag: cached_name = cache.names[method_hash] if cached_name is name: tup = cache.lookup_where[method_hash] if space.config.objspace.std.withmethodcachecounter: cache.hits[name] = cache.hits.get(name, 0) + 1 # print "hit", w_self, name return tup tup = w_self._lookup_where_all_typeobjects(name) cache.versions[method_hash] = version_tag cache.names[method_hash] = name cache.lookup_where[method_hash] = tup if space.config.objspace.std.withmethodcachecounter: cache.misses[name] = cache.misses.get(name, 0) + 1 # print "miss", w_self, name return tup def check_user_subclass(w_self, w_subtype): space = w_self.space if not isinstance(w_subtype, W_TypeObject): raise operationerrfmt(space.w_TypeError, "X is not a type object ('%s')", space.type(w_subtype).getname(space)) if not w_subtype.issubtype(w_self): raise operationerrfmt(space.w_TypeError, "%s.__new__(%s): %s is not a subtype of %s", w_self.name, w_subtype.name, w_subtype.name, w_self.name) if w_self.instancetypedef is not w_subtype.instancetypedef: raise operationerrfmt(space.w_TypeError, "%s.__new__(%s) is not safe, use %s.__new__()", w_self.name, w_subtype.name, w_subtype.name) return w_subtype def _freeze_(w_self): "NOT_RPYTHON. Forces the lazy attributes to be computed." if 'lazyloaders' in w_self.__dict__: for attr in w_self.lazyloaders.keys(): w_self.getdictvalue(w_self.space, attr) del w_self.lazyloaders return False def getdict(w_self, space): # returning a dict-proxy! from pypy.objspace.std.dictproxyobject import DictProxyStrategy from pypy.objspace.std.dictmultiobject import W_DictMultiObject if w_self.lazyloaders: w_self._freeze_() # force un-lazification strategy = space.fromcache(DictProxyStrategy) storage = strategy.erase(w_self) return W_DictMultiObject(space, strategy, storage) def unwrap(w_self, space): if w_self.instancetypedef.fakedcpytype is not None: return w_self.instancetypedef.fakedcpytype from pypy.objspace.std.model import UnwrapError raise UnwrapError(w_self) def is_heaptype(w_self): return w_self.flag_heaptype def is_cpytype(w_self): return w_self.flag_cpytype def is_abstract(w_self): return w_self.flag_abstract def set_abstract(w_self, abstract): w_self.flag_abstract = bool(abstract) def issubtype(w_self, w_type): promote(w_self) promote(w_type) if w_self.space.config.objspace.std.withtypeversion and we_are_jitted(): version_tag1 = w_self.version_tag() version_tag2 = w_type.version_tag() if version_tag1 is not None and version_tag2 is not None: res = _pure_issubtype(w_self, w_type, version_tag1, version_tag2) return res return _issubtype(w_self, w_type) def get_module(w_self): space = w_self.space if w_self.is_heaptype() and '__module__' in w_self.dict_w: return w_self.getdictvalue(space, '__module__') else: # for non-heap types, CPython checks for a module.name in the # type name. That's a hack, so we're allowed to use a different # hack... if ('__module__' in w_self.dict_w and space.isinstance_w(w_self.getdictvalue(space, '__module__'), space.w_str)): return w_self.getdictvalue(space, '__module__') return space.wrap('__builtin__') def get_module_type_name(w_self): space = w_self.space w_mod = w_self.get_module() if not space.isinstance_w(w_mod, space.w_str): mod = '__builtin__' else: mod = space.str_w(w_mod) if mod !='__builtin__': return '%s.%s' % (mod, w_self.name) else: return w_self.name def add_subclass(w_self, w_subclass): space = w_self.space if not space.config.translation.rweakref: return # no weakref support, don't keep track of subclasses import weakref assert isinstance(w_subclass, W_TypeObject) newref = weakref.ref(w_subclass) for i in range(len(w_self.weak_subclasses)): ref = w_self.weak_subclasses[i] if ref() is None: w_self.weak_subclasses[i] = newref return else: w_self.weak_subclasses.append(newref) def remove_subclass(w_self, w_subclass): space = w_self.space if not space.config.translation.rweakref: return # no weakref support, don't keep track of subclasses for i in range(len(w_self.weak_subclasses)): ref = w_self.weak_subclasses[i] if ref() is w_subclass: del w_self.weak_subclasses[i] return def get_subclasses(w_self): space = w_self.space if not space.config.translation.rweakref: msg = ("this feature requires weakrefs, " "which are not available in this build of PyPy") raise OperationError(space.w_RuntimeError, space.wrap(msg)) subclasses_w = [] for ref in w_self.weak_subclasses: w_ob = ref() if w_ob is not None: subclasses_w.append(w_ob) return subclasses_w # for now, weakref support for W_TypeObject is hard to get automatically _lifeline_ = None def getweakref(self): return self._lifeline_ def setweakref(self, space, weakreflifeline): self._lifeline_ = weakreflifeline def delweakref(self): self._lifeline_ = None # ____________________________________________________________ # Initialization of type objects def get_parent_layout(w_type): """Compute the most parent class of 'w_type' whose layout is the same as 'w_type', or None if all parents of 'w_type' have a different layout than 'w_type'. """ w_starttype = w_type while len(w_type.bases_w) > 0: w_bestbase = find_best_base(w_type.space, w_type.bases_w) if w_type.instancetypedef is not w_bestbase.instancetypedef: break if w_type.nslots != w_bestbase.nslots: break w_type = w_bestbase if w_type is not w_starttype: return w_type else: return None def issublayout(w_layout1, w_layout2): space = w_layout2.space while w_layout1 is not w_layout2: w_layout1 = find_best_base(space, w_layout1.bases_w) if w_layout1 is None: return False w_layout1 = w_layout1.w_same_layout_as or w_layout1 return True def find_best_base(space, bases_w): """The best base is one of the bases in the given list: the one whose layout a new type should use as a starting point. """ w_bestbase = None for w_candidate in bases_w: if not isinstance(w_candidate, W_TypeObject): continue if w_bestbase is None: w_bestbase = w_candidate # for now continue candtypedef = w_candidate.instancetypedef besttypedef = w_bestbase.instancetypedef if candtypedef is besttypedef: # two candidates with the same typedef are equivalent unless # one has extra slots over the other if w_candidate.nslots > w_bestbase.nslots: w_bestbase = w_candidate elif issubtypedef(candtypedef, besttypedef): w_bestbase = w_candidate return w_bestbase def check_and_find_best_base(space, bases_w): """The best base is one of the bases in the given list: the one whose layout a new type should use as a starting point. This version checks that bases_w is an acceptable tuple of bases. """ w_bestbase = find_best_base(space, bases_w) if w_bestbase is None: raise OperationError(space.w_TypeError, space.wrap("a new-style class can't have " "only classic bases")) if not w_bestbase.instancetypedef.acceptable_as_base_class: raise operationerrfmt(space.w_TypeError, "type '%s' is not an " "acceptable base class", w_bestbase.instancetypedef.name) # check that all other bases' layouts are superclasses of the bestbase w_bestlayout = w_bestbase.w_same_layout_as or w_bestbase for w_base in bases_w: if isinstance(w_base, W_TypeObject): w_layout = w_base.w_same_layout_as or w_base if not issublayout(w_bestlayout, w_layout): raise OperationError(space.w_TypeError, space.wrap("instance layout conflicts in " "multiple inheritance")) return w_bestbase def copy_flags_from_bases(w_self, w_bestbase): hasoldstylebase = False for w_base in w_self.bases_w: if not isinstance(w_base, W_TypeObject): hasoldstylebase = True continue w_self.hasdict = w_self.hasdict or w_base.hasdict w_self.needsdel = w_self.needsdel or w_base.needsdel w_self.weakrefable = w_self.weakrefable or w_base.weakrefable w_self.nslots = w_bestbase.nslots return hasoldstylebase def create_all_slots(w_self, hasoldstylebase): space = w_self.space dict_w = w_self.dict_w if '__slots__' not in dict_w: wantdict = True wantweakref = True else: wantdict = False wantweakref = False w_slots = dict_w['__slots__'] if (space.isinstance_w(w_slots, space.w_str) or space.isinstance_w(w_slots, space.w_unicode)): slot_names_w = [w_slots] else: slot_names_w = space.unpackiterable(w_slots) for w_slot_name in slot_names_w: slot_name = space.str_w(w_slot_name) if slot_name == '__dict__': if wantdict or w_self.hasdict: raise OperationError(space.w_TypeError, space.wrap("__dict__ slot disallowed: " "we already got one")) wantdict = True elif slot_name == '__weakref__': if wantweakref or w_self.weakrefable: raise OperationError(space.w_TypeError, space.wrap("__weakref__ slot disallowed: " "we already got one")) wantweakref = True else: create_slot(w_self, slot_name) wantdict = wantdict or hasoldstylebase if wantdict: create_dict_slot(w_self) if wantweakref: create_weakref_slot(w_self) if '__del__' in dict_w: w_self.needsdel = True def create_slot(w_self, slot_name): space = w_self.space if not valid_slot_name(slot_name): raise OperationError(space.w_TypeError, space.wrap('__slots__ must be identifiers')) # create member slot_name = _mangle(slot_name, w_self.name) if slot_name not in w_self.dict_w: # Force interning of slot names. slot_name = space.str_w(space.new_interned_str(slot_name)) # in cpython it is ignored less, but we probably don't care member = Member(w_self.nslots, slot_name, w_self) w_self.dict_w[slot_name] = space.wrap(member) w_self.nslots += 1 def create_dict_slot(w_self): if not w_self.hasdict: w_self.dict_w.setdefault('__dict__', w_self.space.wrap(std_dict_descr)) w_self.hasdict = True def create_weakref_slot(w_self): if not w_self.weakrefable: w_self.dict_w.setdefault('__weakref__', w_self.space.wrap(weakref_descr)) w_self.weakrefable = True def valid_slot_name(slot_name): if len(slot_name) == 0 or slot_name[0].isdigit(): return False for c in slot_name: if not c.isalnum() and c != '_': return False return True def setup_user_defined_type(w_self): if len(w_self.bases_w) == 0: w_self.bases_w = [w_self.space.w_object] w_bestbase = check_and_find_best_base(w_self.space, w_self.bases_w) w_self.instancetypedef = w_bestbase.instancetypedef w_self.flag_heaptype = True for w_base in w_self.bases_w: if not isinstance(w_base, W_TypeObject): continue w_self.flag_cpytype |= w_base.flag_cpytype w_self.flag_abstract |= w_base.flag_abstract hasoldstylebase = copy_flags_from_bases(w_self, w_bestbase) create_all_slots(w_self, hasoldstylebase) ensure_common_attributes(w_self) def setup_builtin_type(w_self): w_self.hasdict = w_self.instancetypedef.hasdict w_self.weakrefable = w_self.instancetypedef.weakrefable w_self.w_doc = w_self.space.wrap(w_self.instancetypedef.doc) ensure_common_attributes(w_self) def ensure_common_attributes(w_self): ensure_static_new(w_self) w_self.dict_w.setdefault('__doc__', w_self.w_doc) if w_self.is_heaptype(): ensure_module_attr(w_self) w_self.mro_w = [] # temporarily compute_mro(w_self) def ensure_static_new(w_self): # special-case __new__, as in CPython: # if it is a Function, turn it into a static method if '__new__' in
<filename>pyons/models/rfid/reader.py from enum import Enum import numpy as np from collections import Iterable import pyons from pyons import Entity from pyons.models.rfid import phy, protocol as gen2, journal, pyradise class ReaderDescriptor(object): def __init__(self): super().__init__() self.antennas = [] self.rounds_per_antenna = 1 self.tari = 6.25e-6 self.rtcal = 18.75e-6 self.data0_multiplier = None self.trcal = 33.5e-6 self.rtcal_multiplier = None self.tx_power = 31.5 self.circulator_noise = -80.0 self.switch_power = False self.power_on_interval = 2.0 self.power_off_interval = 0.1 self.sl = gen2.Sel.SL_ALL self.session = gen2.Session.S0 self.session_strategy = Reader.SessionStrategy.ONLY_A self.rounds_per_inventory_flag = 1 self.frequency = 860e6 self.modulation_loss = 0.0 self.tag_encoding = gen2.TagEncoding.FM0 self.dr = gen2.DR.DR_8 self.trext = False self.q = 2 self.channel = None self.ber_model = pyradise.ber_over_rayleigh self.read_tid = True INIT_READER_STAGE = (0, "Init reader") FINISH_READER_STAGE = (9, "Finish reader") class Reader(phy.Node): DELAYED_SEND_EVENT = "delayed send" REPLY_TIMEOUT_EVENT = "reply timeout" POWER_ON_TIMEOUT_EVENT = "power on timeout" POWER_OFF_TIMEOUT_EVENT = "power off timeout" class SessionStrategy(Enum): ONLY_A = 0 ONLY_B = 1 ALTER = 2 @staticmethod def parse(s: str): if s == "A" or s == "a": return Reader.SessionStrategy.ONLY_A elif s == "B" or s == "b": return Reader.SessionStrategy.ONLY_B elif s.upper() == "AB" or s.upper() == "ALTER": return Reader.SessionStrategy.ALTER raise ValueError(f"Unrecognized session strategy '{s}'") def __init__(self, descriptor): super().__init__() assert isinstance(descriptor, ReaderDescriptor) self.antennas = descriptor.antennas for antenna in self.antennas: antenna.node = self self.rounds_per_antenna = descriptor.rounds_per_antenna self.tari = descriptor.tari self.rtcal = (descriptor.rtcal if descriptor.data0_multiplier is None else (descriptor.data0_multiplier + 1) * descriptor.tari) self.trcal = (descriptor.trcal if descriptor.rtcal_multiplier is None else self.rtcal * descriptor.rtcal_multiplier) self.max_tx_power = descriptor.tx_power self.frequency = descriptor.frequency self.circulator_noise = descriptor.circulator_noise self.switch_power = descriptor.switch_power self.power_on_interval = descriptor.power_on_interval self.power_off_interval = descriptor.power_off_interval self.channel = descriptor.channel self.read_tid = descriptor.read_tid self.sl = descriptor.sl self.session = descriptor.session self.session_strategy = descriptor.session_strategy self.rounds_per_inventory_flag = descriptor.rounds_per_inventory_flag self.m = descriptor.tag_encoding self.dr = descriptor.dr self.trext = descriptor.trext self.q = descriptor.q decider = phy.ReaderDecider(None, descriptor.channel, descriptor.ber_model) self._transceiver = phy.Transceiver( node=self, decider=decider, channel=descriptor.channel, modulation_loss=descriptor.modulation_loss) decider.transceiver = self._transceiver self._round_index = 0 self._power = None self._antenna_index = 0 self._last_powered_on = -np.inf self._last_powered_off = -np.inf self._last_tx_end = -np.inf self._last_rx_end = -np.inf self._inventory_flag = None self._reply_timeout_id = None self._delayed_send_timeout_id = None self._power_on_timeout_id = None self._power_off_timeout_id = None self._slot = None # Round and slot info caches self.round_info = None self.tag_read_data = None self._round_started_at = None self._slot_started_at = None self._slot_durations = [] ################################################################### # PROPERTIES ################################################################### @property def transceiver(self): return self._transceiver @property def speed(self): return 0.0 @property def direction(self): return np.array([1, 0, 0]) @property def velocity(self): return np.array([0, 0, 0]) @property def node_type(self): return phy.NodeType.READER_NODE @property def last_powered_on(self): return self._last_powered_on @property def name(self): return "Reader" @property def radiated_power(self): return (None if self._power is None else self._power + self.antenna.cable_loss) @property def is_powered_on(self): return self._power is not None @property def antenna(self): return self.antennas[self._antenna_index] @property def last_powered_off(self): return self._last_powered_off @property def wavelen(self): return phy.SPEED_OF_LIGHT / self.frequency @property def blf(self): return self.dr.ratio / self.trcal @property def data0(self): return self.tari @property def data1(self): return self.rtcal - self.data0 ################################################################### # Public API ################################################################### @Entity.managed def power_on(self): if self.is_powered_on: return # pyons.fine("powered on", sender=self.name) self._cancel_power_timeout() self._power = self.max_tx_power self._last_powered_on = pyons.time() self._inventory_flag = gen2.InventoryFlag.A self.transceiver.set_power(self._power) for transceiver in self.channel.passive_transceivers: transceiver.node.update_received_power() if self.power_on_interval is not None: self._power_off_timeout_id = pyons.create_timeout( self.power_on_interval, Reader.POWER_OFF_TIMEOUT_EVENT) self._handle_round_start() @Entity.managed def power_off(self): if not self.is_powered_on: return # pyons.fine("powered off", sender=self.name) self._power = None self._last_powered_off = pyons.time() self._inventory_flag = None self._cancel_power_timeout() self._cancel_delayed_send() self._cancel_reply_timeout() self.transceiver.set_power(None) self.transceiver.clear() for transceiver in self.channel.passive_transceivers: transceiver.node.update_received_power() if self.power_off_interval is not None: self._power_on_timeout_id = pyons.create_timeout( self.power_off_interval, Reader.POWER_ON_TIMEOUT_EVENT) ################################################################### # API for the Transceiver ################################################################### @property def turned_on(self): return self.is_powered_on @Entity.managed def send_finished(self): self._last_tx_end = pyons.time() dt = gen2.max_t1(rtcal=self.rtcal, blf=self.blf) + gen2.t3() self._reply_timeout_id = pyons.create_timeout( dt, Reader.REPLY_TIMEOUT_EVENT) @Entity.managed def receive_started(self): self._cancel_reply_timeout() def receive_error(self, snr, ber): assert isinstance(self.round_info, journal.InventoryRoundRecord) self.round_info.n_errors += 1 def receive_collision(self, n_replies): assert isinstance(self.round_info, journal.InventoryRoundRecord) self.round_info.n_collisions += 1 @Entity.managed def receive_finished(self, frame, rx_power, snr=None, ber=None): if frame is None: # pyons.fine("receive error {snr}{ber}".format( # snr=('' if snr is None else ' snr={:.2f}dB'.format(snr)), # ber=('' if ber is None else ' ber={:.2f}'.format(ber))), # sender=self.name) self._reply_timeout_id = pyons.create_timeout( gen2.max_t2(self.blf), Reader.REPLY_TIMEOUT_EVENT) return assert isinstance(frame, gen2.TagFrame) # pyons.fine("received: {reply}, power={power}dBm{snr}{ber}".format( # reply=frame.reply, power=rx_power, # snr=('' if snr is None else ' snr={:.2f}dB'.format(snr)), # ber=('' if ber is None else ' ber={:.2f}'.format(ber))), # sender=self.name) self._cancel_reply_timeout() self._cancel_delayed_send() self._last_rx_end = pyons.time() reply = frame.reply if isinstance(reply, gen2.Rn16Reply): self._handle_rn16_reply(reply) elif isinstance(reply, gen2.AckReply): self._handle_ack_reply(reply) elif isinstance(reply, gen2.ReqRnReply): self._handle_reqrn_reply(reply) elif isinstance(reply, gen2.ReadReply): self._handle_read_reply(reply) else: raise RuntimeError( "reader doesn't support reply {}".format(type(reply))) ################################################################### # INITIALIZERS ################################################################### @Entity.initializer(stage=INIT_READER_STAGE) def _initialize(self): if len(self.antennas) == 0: raise RuntimeError("no antennas attached to the reader") self._antenna_index = 0 self._round_index = 0 self.power_on() pyons.add_entity(self.transceiver) reader_info = journal.ReaderInfoRecord() reader_info.n_antennas = len(self.antennas) reader_info.tx_power = self.max_tx_power reader_info.circulator_noise = self.circulator_noise reader_info.power_switch_enabled = self.switch_power reader_info.session_switch_enabled = (self.session_strategy == Reader.SessionStrategy.ALTER) reader_info.power_on_duration = self.power_on_interval reader_info.power_off_duration = self.power_off_interval reader_info.n_rounds_per_antenna = self.rounds_per_antenna reader_info.n_rounds_per_session = self.rounds_per_inventory_flag reader_info.n_slots = 2 ** self.q reader_info.q = self.q reader_info.m = self.m reader_info.trext = self.trext reader_info.dr = self.dr reader_info.blf = self.blf reader_info.data0 = self.data0 reader_info.data1 = self.data1 reader_info.rtcal = self.rtcal reader_info.trcal = self.trcal journal.Journal().write_reader_info(reader_info) for antenna_index in range(0, len(self.antennas)): antenna = self.antennas[antenna_index] assert isinstance(antenna, phy.ReaderAntenna) antenna_info = journal.ReaderAntennaInfoRecord() antenna_info.index = antenna_index antenna_info.side = antenna.side antenna_info.lane = antenna.lane antenna_info.position = antenna.position antenna_info.forward_dir = np.array(antenna.dir_forward) antenna_info.right_dir = np.array(antenna.dir_right) antenna_info.gain = antenna.gain antenna_info.rp = antenna.rp antenna_info.polarization = antenna.polarization antenna_info.cable_loss = antenna.cable_loss journal.Journal().write_reader_antenna_info(antenna_info) ################################################################### # FINALIZERS ################################################################### @Entity.finalizer(stage=FINISH_READER_STAGE) def _finish(self): self.transceiver.clear() self.power_off() pyons.remove_entity(self.transceiver) ################################################################### # DEATH CONDITIONS ################################################################### ################################################################### # STOP CONDITIONS ################################################################### ################################################################### # EVENT HANDLERS ################################################################### @Entity.eventhandler(lambda ev, src: (isinstance(ev, Iterable) and ev[0] == Reader.DELAYED_SEND_EVENT)) def _send_delayed(self, event, source): assert source is self msg, frame = event self.transceiver.send(frame) self._delayed_send_timeout_id = None @Entity.eventhandler(lambda ev, src: ev == Reader.REPLY_TIMEOUT_EVENT) def _handle_reply_timeout(self, event, source): assert event == Reader.REPLY_TIMEOUT_EVENT and source is self # pyons.fine("no reply received", sender=self.name) self._reply_timeout_id = None # STATISTICS ====> if self.tag_read_data.epc is None: self.round_info.n_empty_slots += 1 # <===== END OF STATISTICS self._handle_slot_end() @Entity.eventhandler(lambda ev, src: ev == Reader.POWER_ON_TIMEOUT_EVENT) def _handle_power_on_timeout(self, event, source): assert event == Reader.POWER_ON_TIMEOUT_EVENT and source is self self._power_on_timeout_id = None self.power_on() @Entity.eventhandler(lambda ev, src: ev == Reader.POWER_OFF_TIMEOUT_EVENT) def _handle_power_off_timeout(self, event, source): assert event == Reader.POWER_OFF_TIMEOUT_EVENT and source is self self._power_off_timeout_id = None self.power_off() ################################################################### # INTERNAL API ################################################################### def _send(self, cmd): if isinstance(cmd, gen2.Query): preamble = gen2.ReaderFrame.Preamble( tari=self.tari, rtcal=self.rtcal, trcal=self.trcal) else: preamble = gen2.ReaderFrame.Sync(tari=self.tari, rtcal=self.rtcal) frame = gen2.ReaderFrame(preamble=preamble, cmd=cmd) # pyons.fine("send: {cmd}, duration={duration:.2f}us".format( # cmd=cmd, duration=frame.duration*1e6), sender=self.name) self._cancel_delayed_send() t2 = gen2.min_t2(self.blf) time_from_rx_end = pyons.time() - self._last_rx_end if time_from_rx_end >= t2: self.transceiver.send(frame) else: dt = t2 - time_from_rx_end self._delayed_send_timeout_id = pyons.create_timeout( dt, (Reader.DELAYED_SEND_EVENT, frame)) # # Inventory round pseudo-events handlers # def _handle_round_start(self): if not self.is_powered_on: raise RuntimeError("round start during power-off") self._round_index += 1 self._slot = 0 # # Switching antenna if needed # if self.rounds_per_antenna is not None and ( self._round_index % self.rounds_per_antenna == 0): n_antennas = len(self.antennas) self._antenna_index = (self._antenna_index + 1) % n_antennas # # Switching inventory flag if needed # if self.session_strategy == Reader.SessionStrategy.ALTER: if (self.rounds_per_inventory_flag is not None) and \ (self._round_index % self.rounds_per_inventory_flag == 0): self._inventory_flag = self._inventory_flag.invert() # pyons.fine(("\n\t*---------- NEW ROUND ------------*" # "\n\t* round index : {round_index}" # "\n\t* antenna index: {antenna_index}" # "\n\t* Q : {q}" # "\n\t* session : {session}" # "\n\t* target : {target}").format( # round_index=self._round_index, antenna_index=self._antenna_index, # q=self.q, session=self.session, target=self._inventory_flag), # sender=self.name) for transceiver in self.channel.passive_transceivers: transceiver.node.update_position() transceiver.node.update_received_power() # # Refresh statistics # # STATISTICS ====> self.tag_read_data = self._create_tag_read_data() self.round_info = self._create_round_info() self._slot_durations = [] self._slot_started_at = pyons.time() self._round_started_at = pyons.time() model = pyons.get_model() self.round_info.n_tags = len([tag for tag in model.tags if tag.energized]) self.round_info.n_vehicles_registered = len(model.vehicles) self.round_info.antenna_index = self._antenna_index # <===== END OF STATISTICS # # Generating and sending Query # query = gen2.Query(dr=self.dr, m=self.m, trext=self.trext, sel=self.sl, session=self.session, target=self._inventory_flag, q=self.q) self._send(query) def _handle_round_end(self): # STATISTICS ====> assert isinstance(self.round_info, journal.InventoryRoundRecord) self.round_info.duration = pyons.time() - self._round_started_at self.round_info.min_slot_duration = np.min(self._slot_durations) self.round_info.max_slot_duration = np.max(self._slot_durations) self.round_info.avg_slot_duration = np.average(self._slot_durations) journal.Journal().write_inventory_round(self.round_info) self.round_info = None self._slot_durations = None # <===== END OF STATISTICS self._handle_round_start() def _handle_slot_start(self): # pyons.fine("start slot #{}".format(self._slot), sender=self.name) # STATISTICS ====> self.tag_read_data = self._create_tag_read_data() self._slot_started_at = pyons.time() # <===== END OF STATISTICS if not self.is_powered_on: raise RuntimeError("slot begin during power-off") qrep = gen2.QueryRep(session=self.session) self._send(qrep) def _handle_slot_end(self): # STATISTICS ====> assert isinstance(self.tag_read_data,